diff --git a/.github/actions/install-cudnn/action.yml b/.github/actions/install-cudnn/action.yml index a5afe666ba..e361985294 100644 --- a/.github/actions/install-cudnn/action.yml +++ b/.github/actions/install-cudnn/action.yml @@ -46,10 +46,12 @@ runs: Get-ChildItem -Path "${env:CUDA_PATH}\lib" Get-ChildItem -Path "C:\cudnn\" -Directory | Move-Item -Destination "C:\cudnn\cuda" Get-ChildItem -Path "C:\cudnn\" + Get-ChildItem -Path "C:\cudnn\cuda" Move-Item -Path "C:\cudnn\cuda\bin\cudnn*.dll" -Destination "${env:CUDA_PATH}\bin" Move-Item -Path "C:\cudnn\cuda\include\cudnn*.h" -Destination "${env:CUDA_PATH}\include" Move-Item -Path "C:\cudnn\cuda\lib\cudnn*.lib" -Destination "${env:CUDA_PATH}\lib\x64" - echo "CUDNN_INCLUDE_PATH=""${CUDA_PATH}\include""" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CUDNN_LIBRARY_PATH=""${CUDA_PATH}\lib\x64""" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + Move-Item -Path "C:\cudnn\cuda\lib\x64\cudnn*.lib" -Destination "${env:CUDA_PATH}\lib\x64" + echo "CUDNN_INCLUDE_PATH=""${env:CUDA_PATH}\include""" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CUDNN_LIBRARY_PATH=""${env:CUDA_PATH}\lib\x64""" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append Remove-Item -Path cudnn.zip shell: pwsh \ No newline at end of file diff --git a/.github/actions/install-cudnn/cudnn-url.txt b/.github/actions/install-cudnn/cudnn-url.txt index ee9cc650fc..9c10c44d98 100644 --- a/.github/actions/install-cudnn/cudnn-url.txt +++ b/.github/actions/install-cudnn/cudnn-url.txt @@ -1,8 +1,10 @@ windows 11.3 https://torch-cdn.mlverse.org/cudnn/cudnn-11.3-windows-x64-v8.2.1.32.zip windows 11.7 https://developer.download.nvidia.com/compute/redist/cudnn/v8.5.0/local_installers/11.7/cudnn-windows-x86_64-8.5.0.96_cuda11-archive.zip windows 11.8 https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/cudnn-windows-x86_64-8.7.0.84_cuda11-archive.zip +windows 12.4 https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-8.9.7.29_cuda12-archive.zip linux 11.3 https://torch-cdn.mlverse.org/cudnn/cudnn-11.3-linux-x64-v8.2.1.32.tgz linux 11.6 https://torch-cdn.mlverse.org/cudnn/cudnn-linux-x86_64-8.6.0.163_cuda11-archive.tgz linux 10.2 https://torch-cdn.mlverse.org/cudnn/cudnn-10.2-linux-x64-v7.6.5.32.tgz linux 11.7 https://torch-cdn.mlverse.org/cudnn/cudnn-linux-x86_64-8.5.0.96_cuda11-archive.tar.xz -linux 11.8 https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.0.131_cuda11-archive.tar.xz \ No newline at end of file +linux 11.8 https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.0.131_cuda11-archive.tar.xz +linux 12.4 https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ No newline at end of file diff --git a/.github/workflows/lantern.yaml b/.github/workflows/lantern.yaml index c0edddb299..b631b31dcd 100644 --- a/.github/workflows/lantern.yaml +++ b/.github/workflows/lantern.yaml @@ -18,16 +18,16 @@ jobs: config: # when changing supported versions here, please modify supported versions # in install.R - - {os: macOS, version: cpu-intel, runner: macos-12} + - {os: macOS, version: cpu-intel, runner: macos-13} - {os: macOS, version: cpu-m1, runner: [self-hosted, m1]} - {os: ubuntu, version: cpu, runner: ubuntu-latest} - - {os: ubuntu, version: cu11.7, runner: [self-hosted, linux]} - {os: ubuntu, version: cu11.8, runner: [self-hosted, linux]} + - {os: ubuntu, version: cu12.4, runner: [self-hosted, linux]} - - {os: windows, version: cpu, runner: windows-2019} - - {os: windows, version: cu11.7, runner: windows-2019} + - {os: windows, version: cpu, runner: windows-2022} - {os: windows, version: cu11.8, runner: windows-2019} + - {os: windows, version: cu12.4, runner: windows-2019} precxx11abi: [0, 1] @@ -39,16 +39,14 @@ jobs: - config: {os: ubuntu} precxx11abi: 1 container: ubuntu:20.04 - - # specify the CUDA patch for each major/minor version. - # required for cuda installation - - config: {version: cu11.7} - cuda: 11.7 - cuda_patch: 1 - config: {version: cu11.8} cuda: 11.8 cuda_patch: 0 + + - config: {version: cu12.4} + cuda: 12.4 + cuda_patch: 1 exclude: - config: {os: macOS} @@ -80,17 +78,18 @@ jobs: - name: Install CUDA if: ${{matrix.cuda != ''}} - uses: Jimver/cuda-toolkit@v0.2.10 + uses: Jimver/cuda-toolkit@v0.2.18 id: cuda-toolkit with: cuda: "${{matrix.cuda}}.${{matrix.cuda_patch}}" + log-file-suffix: '${{matrix.cuda}}.${{matrix.cuda_patch}}.${{matrix.precxx11abi}}.txt' - name: Install CuDNN if: ${{ matrix.cuda != '' }} uses: ./.github/actions/install-cudnn with: cuda_version: ${{ matrix.cuda }} - + - name: Run cmake run: | cd src/lantern/ diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index e1996cd610..281ea3b016 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -32,18 +32,14 @@ jobs: - {os: ubuntu, r_version: release, version: cpu, runner: ubuntu-20.04} - {os: ubuntu, r_version: release, version: cpu, runner: ubuntu-20.04, precxx11abi: 1} - - {os: ubuntu, r_version: release, version: cu11.7, runner: [self-hosted, gpu-local]} - - {os: ubuntu, r_version: release, version: cu11.8, runner: [self-hosted, gpu-local]} + - {os: ubuntu, r_version: release, version: cu12.4, runner: [self-hosted, gpu-local]} - {os: windows, r_version: release, version: cpu, runner: windows-latest} include: - - config: {os: ubuntu, version: cu11.7} - container: {image: 'nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04', options: '--gpus all --runtime=nvidia'} - - - config: {os: ubuntu, version: cu11.8} - container: {image: 'nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04', options: '--gpus all --runtime=nvidia'} + - config: {os: ubuntu, version: cu12.4} + container: {image: 'nvidia/cuda:12.4.1-cudnn-devel-ubuntu20.04', options: '--gpus all --runtime=nvidia'} runs-on: ${{ matrix.config.runner }} container: ${{ matrix.container }} @@ -79,7 +75,7 @@ jobs: - uses: ./.github/actions/setup-r with: r_version: ${{ matrix.config.r_version}} - + - name: Setup cmake uses: jwlawson/actions-setup-cmake@v1 if: ${{ env.BUILD_LANTERN == 1}} @@ -91,16 +87,15 @@ jobs: cache: false extra-packages: any::rcmdcheck needs: check - - - run: | - Sys.getenv("PRECXX11ABI") - Sys.setenv("PRECXX11ABI" = 1) - pkgbuild::build() - shell: Rscript {0} - + + - name: Session info + run: | + Rscript -e "sessionInfo()" + - uses: r-lib/actions/check-r-package@v2 with: error-on: '"error"' args: 'c("--no-multiarch", "--no-manual", "--as-cran")' + diff --git a/DESCRIPTION b/DESCRIPTION index 01b86796a3..feb1ced255 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: torch Type: Package Title: Tensors and Neural Networks with 'GPU' Acceleration -Version: 0.13.0.9000 +Version: 0.13.0.9001 Authors@R: c( person("Daniel", "Falbel", email = "daniel@rstudio.com", role = c("aut", "cre", "cph")), person("Javier", "Luraschi", email = "jluraschi@gmail.com", role = c("aut")), @@ -11,6 +11,7 @@ Authors@R: c( person("Krzysztof", "Joachimiak", role = c("ctb")), person("Hamada S.", "Badr", role = c("ctb")), person("Sebastian", "Fischer", role = c("ctb")), + person("Maximilian", "Pichler", role = c("ctb")), person(family = "RStudio", role = c("cph")) ) Description: Provides functionality to define and train neural networks similar to @@ -43,8 +44,9 @@ Imports: glue, desc, safetensors (>= 0.1.1), - jsonlite -RoxygenNote: 7.3.1 + jsonlite, + scales +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) Suggests: testthat (>= 3.0.0), @@ -177,3 +179,5 @@ Collate: 'variable_list.R' 'with-indices.R' 'wrapers.R' +Remotes: + RcppCore/Rcpp diff --git a/NEWS.md b/NEWS.md index 19583433cd..a52fcd8fd6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -3,6 +3,7 @@ ## Bug fixes - `torch_iinfo()` now support all integer dtypes (#1190 @cregouby) +- Fixed float key_padding_mask in `nnf_multi_head_attention_forward()` (#1205) # torch 0.13.0 diff --git a/R/RcppExports.R b/R/RcppExports.R index c065c5de94..41beb7cef8 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -561,6 +561,10 @@ cpp_torch_method_all_self_Tensor_dim_int64_t <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_method_all_self_Tensor_dim_int64_t`, self, dim, keepdim) } +cpp_torch_method_all_self_Tensor_dim_IntArrayRef <- function(self, dim, keepdim) { + .Call(`_torch_cpp_torch_method_all_self_Tensor_dim_IntArrayRef`, self, dim, keepdim) +} + cpp_torch_method_all_self_Tensor_dim_Dimname <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_method_all_self_Tensor_dim_Dimname`, self, dim, keepdim) } @@ -573,6 +577,10 @@ cpp_torch_method_any_self_Tensor_dim_int64_t <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_method_any_self_Tensor_dim_int64_t`, self, dim, keepdim) } +cpp_torch_method_any_self_Tensor_dim_IntArrayRef <- function(self, dim, keepdim) { + .Call(`_torch_cpp_torch_method_any_self_Tensor_dim_IntArrayRef`, self, dim, keepdim) +} + cpp_torch_method_any_self_Tensor_dim_Dimname <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_method_any_self_Tensor_dim_Dimname`, self, dim, keepdim) } @@ -725,6 +733,10 @@ cpp_torch_method_copysign__self_Tensor_other_Scalar <- function(self, other) { .Call(`_torch_cpp_torch_method_copysign__self_Tensor_other_Scalar`, self, other) } +cpp_torch_method__lazy_clone_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_method__lazy_clone_self_Tensor`, self) +} + cpp_torch_method_logical_not_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_method_logical_not_self_Tensor`, self) } @@ -1209,8 +1221,8 @@ cpp_torch_method_lcm__self_Tensor_other_Tensor <- function(self, other) { .Call(`_torch_cpp_torch_method_lcm__self_Tensor_other_Tensor`, self, other) } -cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor <- function(self, indices) { - .Call(`_torch_cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor`, self, indices) +cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor <- function(self, indices) { + .Call(`_torch_cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor`, self, indices) } cpp_torch_method_index_copy__self_Tensor_dim_int64_t_index_Tensor_source_Tensor <- function(self, dim, index, source) { @@ -1229,12 +1241,12 @@ cpp_torch_method_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor < .Call(`_torch_cpp_torch_method_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor`, self, dim, index, source) } -cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate) { - .Call(`_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate) +cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate) } -cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate) { - .Call(`_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate) +cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate) } cpp_torch_method_isclose_self_Tensor_other_Tensor <- function(self, other, rtol, atol, equal_nan) { @@ -1773,6 +1785,10 @@ cpp_torch_method_slice_self_Tensor <- function(self, dim, start, end, step) { .Call(`_torch_cpp_torch_method_slice_self_Tensor`, self, dim, start, end, step) } +cpp_torch_method_slice_inverse_self_Tensor_src_Tensor <- function(self, src, dim, start, end, step) { + .Call(`_torch_cpp_torch_method_slice_inverse_self_Tensor_src_Tensor`, self, src, dim, start, end, step) +} + cpp_torch_method_slice_scatter_self_Tensor_src_Tensor <- function(self, src, dim, start, end, step) { .Call(`_torch_cpp_torch_method_slice_scatter_self_Tensor_src_Tensor`, self, src, dim, start, end, step) } @@ -2025,8 +2041,8 @@ cpp_torch_method__nested_tensor_strides_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_method__nested_tensor_strides_self_Tensor`, self) } -cpp_torch_method__nested_tensor_offsets_self_Tensor <- function(self) { - .Call(`_torch_cpp_torch_method__nested_tensor_offsets_self_Tensor`, self) +cpp_torch_method__nested_tensor_storage_offsets_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_method__nested_tensor_storage_offsets_self_Tensor`, self) } cpp_torch_method_trunc_self_Tensor <- function(self) { @@ -2193,12 +2209,16 @@ cpp_torch_method_sparse_mask_self_Tensor_mask_Tensor <- function(self, mask) { .Call(`_torch_cpp_torch_method_sparse_mask_self_Tensor_mask_Tensor`, self, mask) } -cpp_torch_method_to_dense_self_Tensor <- function(self, dtype) { - .Call(`_torch_cpp_torch_method_to_dense_self_Tensor`, self, dtype) +cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor <- function(self, mask, accumulate_matches) { + .Call(`_torch_cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor`, self, mask, accumulate_matches) } -cpp_torch_method__to_dense_self_Tensor <- function(self, dtype) { - .Call(`_torch_cpp_torch_method__to_dense_self_Tensor`, self, dtype) +cpp_torch_method_to_dense_self_Tensor <- function(self, dtype, masked_grad) { + .Call(`_torch_cpp_torch_method_to_dense_self_Tensor`, self, dtype, masked_grad) +} + +cpp_torch_method__to_dense_self_Tensor <- function(self, dtype, masked_grad) { + .Call(`_torch_cpp_torch_method__to_dense_self_Tensor`, self, dtype, masked_grad) } cpp_torch_method_sparse_dim_self_Tensor <- function(self) { @@ -2277,26 +2297,50 @@ cpp_torch_method_to_sparse_self_Tensor_sparse_dim_int64_t <- function(self, spar .Call(`_torch_cpp_torch_method_to_sparse_self_Tensor_sparse_dim_int64_t`, self, sparse_dim) } +cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t <- function(self, sparse_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t`, self, sparse_dim) +} + cpp_torch_method_to_sparse_self_Tensor <- function(self, layout, blocksize, dense_dim) { .Call(`_torch_cpp_torch_method_to_sparse_self_Tensor`, self, layout, blocksize, dense_dim) } +cpp_torch_method__to_sparse_self_Tensor <- function(self, layout, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_self_Tensor`, self, layout, blocksize, dense_dim) +} + cpp_torch_method_to_sparse_csr_self_Tensor <- function(self, dense_dim) { .Call(`_torch_cpp_torch_method_to_sparse_csr_self_Tensor`, self, dense_dim) } +cpp_torch_method__to_sparse_csr_self_Tensor <- function(self, dense_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_csr_self_Tensor`, self, dense_dim) +} + cpp_torch_method_to_sparse_csc_self_Tensor <- function(self, dense_dim) { .Call(`_torch_cpp_torch_method_to_sparse_csc_self_Tensor`, self, dense_dim) } +cpp_torch_method__to_sparse_csc_self_Tensor <- function(self, dense_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_csc_self_Tensor`, self, dense_dim) +} + cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef <- function(self, blocksize, dense_dim) { .Call(`_torch_cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef`, self, blocksize, dense_dim) } +cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef <- function(self, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef`, self, blocksize, dense_dim) +} + cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef <- function(self, blocksize, dense_dim) { .Call(`_torch_cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef`, self, blocksize, dense_dim) } +cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef <- function(self, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef`, self, blocksize, dense_dim) +} + cpp_torch_method_to_mkldnn_self_Tensor <- function(self, dtype) { .Call(`_torch_cpp_torch_method_to_mkldnn_self_Tensor`, self, dtype) } @@ -2977,6 +3021,10 @@ cpp_torch_method_nonzero_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_method_nonzero_self_Tensor`, self) } +cpp_torch_method_nonzero_static_self_Tensor_size_int64_t <- function(self, size, fill_value) { + .Call(`_torch_cpp_torch_method_nonzero_static_self_Tensor_size_int64_t`, self, size, fill_value) +} + cpp_torch_method_nonzero_numpy_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_method_nonzero_numpy_self_Tensor`, self) } @@ -3473,10 +3521,46 @@ cpp_torch_namespace__assert_async_self_Tensor <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__assert_async_self_Tensor`, self)) } +cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view <- function(self, assert_msg) { + invisible(.Call(`_torch_cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view`, self, assert_msg)) +} + +cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view <- function(self, assert_msg) { + invisible(.Call(`_torch_cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view`, self, assert_msg)) +} + +cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor <- function(self, assert_msg, dep_token) { + .Call(`_torch_cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor`, self, assert_msg, dep_token) +} + +cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor <- function(self, assert_msg, dep_token) { + .Call(`_torch_cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor`, self, assert_msg, dep_token) +} + cpp_torch_namespace__assert_tensor_metadata_a_Tensor <- function(a, size, stride, dtype) { invisible(.Call(`_torch_cpp_torch_namespace__assert_tensor_metadata_a_Tensor`, a, size, stride, dtype)) } +cpp_torch_namespace__print_s_c10string_view <- function(s) { + invisible(.Call(`_torch_cpp_torch_namespace__print_s_c10string_view`, s)) +} + +cpp_torch_namespace_sym_constrain_range_size_Scalar <- function(size, min, max) { + invisible(.Call(`_torch_cpp_torch_namespace_sym_constrain_range_size_Scalar`, size, min, max)) +} + +cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar <- function(size, min, max) { + invisible(.Call(`_torch_cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar`, size, min, max)) +} + +cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor <- function(size, min, max, dep_token) { + .Call(`_torch_cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor`, size, min, max, dep_token) +} + +cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor <- function(size, min, max, dep_token) { + .Call(`_torch_cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor`, size, min, max, dep_token) +} + cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t <- function(log_probs, targets, input_lengths, target_lengths, blank) { .Call(`_torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t`, log_probs, targets, input_lengths, target_lengths, blank) } @@ -3781,14 +3865,26 @@ cpp_torch_namespace__test_check_tensor_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace__test_check_tensor_self_Tensor`, self) } +cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor`, self, other) +} + cpp_torch_namespace_all_self_Tensor_dim_int64_t <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_namespace_all_self_Tensor_dim_int64_t`, self, dim, keepdim) } +cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef <- function(self, dim, keepdim) { + .Call(`_torch_cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef`, self, dim, keepdim) +} + cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t <- function(out, self, dim, keepdim) { .Call(`_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t`, out, self, dim, keepdim) } +cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef <- function(out, self, dim, keepdim) { + .Call(`_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef`, out, self, dim, keepdim) +} + cpp_torch_namespace_all_self_Tensor_dim_Dimname <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_namespace_all_self_Tensor_dim_Dimname`, self, dim, keepdim) } @@ -3805,10 +3901,18 @@ cpp_torch_namespace_any_self_Tensor_dim_int64_t <- function(self, dim, keepdim) .Call(`_torch_cpp_torch_namespace_any_self_Tensor_dim_int64_t`, self, dim, keepdim) } +cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef <- function(self, dim, keepdim) { + .Call(`_torch_cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef`, self, dim, keepdim) +} + cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t <- function(out, self, dim, keepdim) { .Call(`_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t`, out, self, dim, keepdim) } +cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef <- function(out, self, dim, keepdim) { + .Call(`_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef`, out, self, dim, keepdim) +} + cpp_torch_namespace_any_self_Tensor_dim_Dimname <- function(self, dim, keepdim) { .Call(`_torch_cpp_torch_namespace_any_self_Tensor_dim_Dimname`, self, dim, keepdim) } @@ -4101,6 +4205,10 @@ cpp_torch_namespace_copysign_out_out_Tensor_self_Tensor_other_Scalar <- function .Call(`_torch_cpp_torch_namespace_copysign_out_out_Tensor_self_Tensor_other_Scalar`, out, self, other) } +cpp_torch_namespace__lazy_clone_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__lazy_clone_self_Tensor`, self) +} + cpp_torch_namespace_logical_not_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_logical_not_self_Tensor`, self) } @@ -4525,6 +4633,10 @@ cpp_torch_namespace_cudnn_convolution_self_Tensor_weight_Tensor_padding_IntArray .Call(`_torch_cpp_torch_namespace_cudnn_convolution_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool`, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) } +cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool <- function(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) { + .Call(`_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool`, out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) +} + cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool <- function(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) { .Call(`_torch_cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool`, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) } @@ -4877,6 +4989,10 @@ cpp_torch_namespace_empty_size_IntArrayRef <- function(size, options, memory_for .Call(`_torch_cpp_torch_namespace_empty_size_IntArrayRef`, size, options, memory_format) } +cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef <- function(size, physical_layout, options) { + .Call(`_torch_cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef`, size, physical_layout, options) +} + cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef <- function(size, options, scale, zero_point, memory_format) { .Call(`_torch_cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef`, size, options, scale, zero_point, memory_format) } @@ -5209,28 +5325,24 @@ cpp_torch_namespace__validate_compressed_sparse_indices_is_crow_bool_compressed_ invisible(.Call(`_torch_cpp_torch_namespace__validate_compressed_sparse_indices_is_crow_bool_compressed_idx_Tensor_plain_idx_Tensor_cdim_int64_t_dim_int64_t_nnz_int64_t`, is_crow, compressed_idx, plain_idx, cdim, dim, nnz)) } -cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t <- function(device_index) { - .Call(`_torch_cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t`, device_index) +cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor <- function(self, indices) { + .Call(`_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor`, self, indices) } -cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t <- function(device_index) { - .Call(`_torch_cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t`, device_index) +cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor <- function(out, self, indices) { + .Call(`_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor`, out, self, indices) } -cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t <- function(device_index, max_size) { - invisible(.Call(`_torch_cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t`, device_index, max_size)) +cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor <- function(self, indices) { + .Call(`_torch_cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor`, self, indices) } -cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t <- function(device_index) { - invisible(.Call(`_torch_cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t`, device_index)) +cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar <- function(self, mask, indices, fill) { + .Call(`_torch_cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar`, self, mask, indices, fill) } -cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor <- function(self, indices) { - .Call(`_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor`, self, indices) -} - -cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor <- function(out, self, indices) { - .Call(`_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor`, out, self, indices) +cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, mask, indices, values) { + .Call(`_torch_cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, mask, indices, values) } cpp_torch_namespace_index_copy_out_out_Tensor_self_Tensor_dim_int64_t_index_Tensor_source_Tensor <- function(out, self, dim, index, source) { @@ -5245,16 +5357,20 @@ cpp_torch_namespace_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tenso .Call(`_torch_cpp_torch_namespace_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor`, self, dim, index, source) } -cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate) { - .Call(`_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate) +cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate) +} + +cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate) } -cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate) { - .Call(`_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate) +cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate) } -cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate, unsafe) { - .Call(`_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate, unsafe) +cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate, unsafe) { + .Call(`_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate, unsafe) } cpp_torch_namespace_instance_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_use_input_stats_bool_momentum_double_eps_double_cudnn_enabled_bool <- function(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled) { @@ -5377,6 +5493,10 @@ cpp_torch_namespace_native_layer_norm_backward_grad_out_Tensor_input_Tensor_norm .Call(`_torch_cpp_torch_namespace_native_layer_norm_backward_grad_out_Tensor_input_Tensor_normalized_shape_IntArrayRef_mean_Tensor_rstd_Tensor_weight_Tensor_bias_Tensor_output_mask_stdarraybool3`, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask) } +cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef <- function(input, normalized_shape, weight, eps) { + .Call(`_torch_cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef`, input, normalized_shape, weight, eps) +} + cpp_torch_namespace_nan_to_num_self_Tensor <- function(self, nan, posinf, neginf) { .Call(`_torch_cpp_torch_namespace_nan_to_num_self_Tensor`, self, nan, posinf, neginf) } @@ -5417,6 +5537,46 @@ cpp_torch_namespace_mkldnn_linear_backward_self_Tensor_grad_output_Tensor_weight .Call(`_torch_cpp_torch_namespace_mkldnn_linear_backward_self_Tensor_grad_output_Tensor_weight_Tensor_output_mask_stdarraybool3`, self, grad_output, weight, output_mask) } +cpp_torch_namespace__cslt_compress_input_Tensor <- function(input) { + .Call(`_torch_cpp_torch_namespace__cslt_compress_input_Tensor`, input) +} + +cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor <- function(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id) { + .Call(`_torch_cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor`, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id) +} + +cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor <- function(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result) { + .Call(`_torch_cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor`, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result) +} + +cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor <- function(input, algorithm, use_cutlass) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor`, input, algorithm, use_cutlass) +} + +cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor <- function(input, thread_masks) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor`, input, thread_masks) +} + +cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor <- function(input, thread_masks) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor`, input, thread_masks) +} + +cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor <- function(input, weight, meta, bias, activation, out_dtype) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor`, input, weight, meta, bias, activation, out_dtype) +} + +cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor <- function(mat1, mat1_meta, mat2, out_dtype) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor`, mat1, mat1_meta, mat2, out_dtype) +} + +cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor <- function(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype) { + .Call(`_torch_cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor`, input, mat1, mat1_meta, mat2, alpha, beta, out_dtype) +} + +cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor <- function(input, weight, scale, bias, activation) { + .Call(`_torch_cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor`, input, weight, scale, bias, activation) +} + cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor <- function(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias) { .Call(`_torch_cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor`, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias) } @@ -5433,6 +5593,14 @@ cpp_torch_namespace_fbgemm_pack_gemm_matrix_fp16_input_Tensor <- function(input) .Call(`_torch_cpp_torch_namespace_fbgemm_pack_gemm_matrix_fp16_input_Tensor`, input) } +cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor <- function(weight, weight_scale, weight_zero_point, bias) { + .Call(`_torch_cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor`, weight, weight_scale, weight_zero_point, bias) +} + +cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t <- function(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel) { + .Call(`_torch_cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t`, input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel) +} + cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor <- function(input, packed_weight, bias) { .Call(`_torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor`, input, packed_weight, bias) } @@ -5465,10 +5633,34 @@ cpp_torch_namespace_linspace_start_Scalar_end_Scalar_steps_int64_t <- function(s .Call(`_torch_cpp_torch_namespace_linspace_start_Scalar_end_Scalar_steps_int64_t`, start, end, steps, options) } +cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t <- function(start, end, steps, options) { + .Call(`_torch_cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t`, start, end, steps, options) +} + +cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t <- function(start, end, steps, options) { + .Call(`_torch_cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t`, start, end, steps, options) +} + +cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t <- function(start, end, steps, options) { + .Call(`_torch_cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t`, start, end, steps, options) +} + cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t <- function(out, start, end, steps) { .Call(`_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t`, out, start, end, steps) } +cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t <- function(out, start, end, steps) { + .Call(`_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t`, out, start, end, steps) +} + +cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t <- function(out, start, end, steps) { + .Call(`_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t`, out, start, end, steps) +} + +cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t <- function(out, start, end, steps) { + .Call(`_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t`, out, start, end, steps) +} + cpp_torch_namespace_log_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_log_self_Tensor`, self) } @@ -5569,10 +5761,34 @@ cpp_torch_namespace_logspace_start_Scalar_end_Scalar_steps_int64_t <- function(s .Call(`_torch_cpp_torch_namespace_logspace_start_Scalar_end_Scalar_steps_int64_t`, start, end, steps, base, options) } +cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t <- function(start, end, steps, base, options) { + .Call(`_torch_cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t`, start, end, steps, base, options) +} + +cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t <- function(start, end, steps, base, options) { + .Call(`_torch_cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t`, start, end, steps, base, options) +} + +cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t <- function(start, end, steps, base, options) { + .Call(`_torch_cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t`, start, end, steps, base, options) +} + cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t <- function(out, start, end, steps, base) { .Call(`_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t`, out, start, end, steps, base) } +cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t <- function(out, start, end, steps, base) { + .Call(`_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t`, out, start, end, steps, base) +} + +cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t <- function(out, start, end, steps, base) { + .Call(`_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t`, out, start, end, steps, base) +} + +cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t <- function(out, start, end, steps, base) { + .Call(`_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t`, out, start, end, steps, base) +} + cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t <- function(self, dim, dtype) { .Call(`_torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t`, self, dim, dtype) } @@ -5765,6 +5981,10 @@ cpp_torch_namespace_quantized_max_pool2d_self_Tensor_kernel_size_IntArrayRef <- .Call(`_torch_cpp_torch_namespace_quantized_max_pool2d_self_Tensor_kernel_size_IntArrayRef`, self, kernel_size, stride, padding, dilation, ceil_mode) } +cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef <- function(self, kernel_size, stride, padding, dilation, ceil_mode) { + .Call(`_torch_cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef`, self, kernel_size, stride, padding, dilation, ceil_mode) +} + cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef <- function(self, kernel_size, stride, padding, dilation, ceil_mode) { .Call(`_torch_cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef`, self, kernel_size, stride, padding, dilation, ceil_mode) } @@ -5773,6 +5993,10 @@ cpp_torch_namespace_mean_self_Tensor <- function(self, dtype) { .Call(`_torch_cpp_torch_namespace_mean_self_Tensor`, self, dtype) } +cpp_torch_namespace_mean_out_out_Tensor_self_Tensor <- function(out, self, dtype) { + .Call(`_torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor`, out, self, dtype) +} + cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef <- function(self, dim, keepdim, dtype) { .Call(`_torch_cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef`, self, dim, keepdim, dtype) } @@ -5925,6 +6149,26 @@ cpp_torch_namespace_mm_out_out_Tensor_self_Tensor_mat2_Tensor <- function(out, s .Call(`_torch_cpp_torch_namespace_mm_out_out_Tensor_self_Tensor_mat2_Tensor`, out, self, mat2) } +cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor <- function(self, mat2) { + .Call(`_torch_cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor`, self, mat2) +} + +cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor <- function(out, self, mat2) { + .Call(`_torch_cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor`, out, self, mat2) +} + +cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t <- function(self, innerKTiles) { + .Call(`_torch_cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t`, self, innerKTiles) +} + +cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor <- function(self, mat2, qGroupSize, qScaleAndZeros) { + .Call(`_torch_cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor`, self, mat2, qGroupSize, qScaleAndZeros) +} + +cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor <- function(self, mat2, scales) { + .Call(`_torch_cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor`, self, mat2, scales) +} + cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor <- function(sparse, dense) { .Call(`_torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor`, sparse, dense) } @@ -6021,6 +6265,10 @@ cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Ten .Call(`_torch_cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, training, momentum, eps) } +cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, momentum, eps) +} + cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double <- function(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps) { .Call(`_torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double`, out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps) } @@ -6061,8 +6309,8 @@ cpp_torch_namespace_batch_norm_backward_reduce_grad_out_Tensor_input_Tensor_mean .Call(`_torch_cpp_torch_namespace_batch_norm_backward_reduce_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool`, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g) } -cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor <- function(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) { - .Call(`_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor`, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) +cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor <- function(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) { + .Call(`_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor`, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) } cpp_torch_namespace_batch_norm_update_stats_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double <- function(input, running_mean, running_var, momentum) { @@ -6657,6 +6905,10 @@ cpp_torch_namespace_slice_backward_grad_output_Tensor_input_sizes_IntArrayRef_di .Call(`_torch_cpp_torch_namespace_slice_backward_grad_output_Tensor_input_sizes_IntArrayRef_dim_int64_t_start_int64_t_end_int64_t_step_int64_t`, grad_output, input_sizes, dim, start, end, step) } +cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor <- function(self, src, dim, start, end, step) { + .Call(`_torch_cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor`, self, src, dim, start, end, step) +} + cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor <- function(self, src, dim, start, end, step) { .Call(`_torch_cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor`, self, src, dim, start, end, step) } @@ -6773,6 +7025,14 @@ cpp_torch_namespace_sspaddmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor .Call(`_torch_cpp_torch_namespace_sspaddmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor`, out, self, mat1, mat2, beta, alpha) } +cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t <- function(tensors, dim, num_chunks) { + .Call(`_torch_cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t`, tensors, dim, num_chunks) +} + +cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t <- function(out, tensors, dim, num_chunks) { + .Call(`_torch_cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t`, out, tensors, dim, num_chunks) +} + cpp_torch_namespace_stack_tensors_TensorList <- function(tensors, dim) { .Call(`_torch_cpp_torch_namespace_stack_tensors_TensorList`, tensors, dim) } @@ -7073,12 +7333,56 @@ cpp_torch_namespace__nested_from_padded_and_nested_example_padded_Tensor_nt_exam .Call(`_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_padded_Tensor_nt_example_Tensor`, padded, nt_example) } -cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef <- function(self, nested_size, nested_strides, offsets) { - .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef`, self, nested_size, nested_strides, offsets) +cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor <- function(self, nested_size, nested_strides, offsets) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor`, self, nested_size, nested_strides, offsets) +} + +cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor <- function(self, nested_size, nested_strides, offsets) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor`, self, nested_size, nested_strides, offsets) +} + +cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor <- function(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor`, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) +} + +cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor <- function(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor`, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) +} + +cpp_torch_namespace__nested_get_values_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_values_self_Tensor`, self) } -cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef <- function(self, nested_size, nested_strides, offsets) { - .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef`, self, nested_size, nested_strides, offsets) +cpp_torch_namespace__nested_get_values_copy_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_values_copy_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_offsets_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_offsets_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_lengths_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_lengths_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_ragged_idx_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_ragged_idx_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_min_seqlen_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_min_seqlen_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_max_seqlen_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__nested_get_max_seqlen_self_Tensor`, self) +} + +cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor <- function(any) { + .Call(`_torch_cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor`, any) +} + +cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor <- function(nested_size) { + .Call(`_torch_cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor`, nested_size) } cpp_torch_namespace__trilinear_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef <- function(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim) { @@ -7277,6 +7581,22 @@ cpp_torch_namespace_native_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_boo .Call(`_torch_cpp_torch_namespace_native_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType`, self, p, dim, keepdim, dtype) } +cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, momentum, eps) +} + +cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps) +} + +cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, momentum, eps) +} + +cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor <- function(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve) { + .Call(`_torch_cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor`, grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve) +} + cpp_torch_namespace__sparse_sum_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace__sparse_sum_self_Tensor`, self) } @@ -7509,6 +7829,18 @@ cpp_torch_namespace__addmm_activation_self_Tensor_mat1_Tensor_mat2_Tensor <- fun .Call(`_torch_cpp_torch_namespace__addmm_activation_self_Tensor_mat1_Tensor_mat2_Tensor`, self, mat1, mat2, beta, alpha, use_gelu) } +cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor <- function(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum) { + .Call(`_torch_cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor`, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum) +} + +cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor <- function(out, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum) { + .Call(`_torch_cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor`, out, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum) +} + +cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions <- function(nnz, dense_dim, size, blocksize, index_dtype, options) { + .Call(`_torch_cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions`, nnz, dense_dim, size, blocksize, index_dtype, options) +} + cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions <- function(compressed_indices, plain_indices, values, size, options) { .Call(`_torch_cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions`, compressed_indices, plain_indices, values, size, options) } @@ -7573,20 +7905,20 @@ cpp_torch_namespace_sparse_coo_tensor_size_IntArrayRef_options_TensorOptions <- .Call(`_torch_cpp_torch_namespace_sparse_coo_tensor_size_IntArrayRef_options_TensorOptions`, size, options) } -cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions <- function(indices, values, options) { - .Call(`_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions`, indices, values, options) +cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions <- function(indices, values, options, is_coalesced) { + .Call(`_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions`, indices, values, options, is_coalesced) } -cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions <- function(indices, values, size, options) { - .Call(`_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions`, indices, values, size, options) +cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions <- function(indices, values, size, options, is_coalesced) { + .Call(`_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions`, indices, values, size, options, is_coalesced) } -cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef <- function(indices, values, size, options) { - .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef`, indices, values, size, options) +cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef <- function(indices, values, size, options, is_coalesced) { + .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef`, indices, values, size, options, is_coalesced) } -cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef <- function(indices, values, size) { - invisible(.Call(`_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef`, indices, values, size)) +cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef <- function(indices, values, size, is_coalesced) { + invisible(.Call(`_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef`, indices, values, size, is_coalesced)) } cpp_torch_namespace__validate_sparse_compressed_tensor_args_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_layout_Layout <- function(compressed_indices, plain_indices, values, size, layout) { @@ -7613,16 +7945,16 @@ cpp_torch_namespace__sparse_coo_tensor_with_dims_sparse_dim_int64_t_dense_dim_in .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_options_TensorOptions`, sparse_dim, dense_dim, size, options) } -cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions <- function(sparse_dim, dense_dim, size, indices, values, options) { - .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions`, sparse_dim, dense_dim, size, indices, values, options) +cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions <- function(sparse_dim, dense_dim, size, indices, values, options, is_coalesced) { + .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions`, sparse_dim, dense_dim, size, indices, values, options, is_coalesced) } cpp_torch_namespace__to_cpu_tensors_TensorList <- function(tensors) { .Call(`_torch_cpp_torch_namespace__to_cpu_tensors_TensorList`, tensors) } -cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor <- function(grad, input) { - .Call(`_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor`, grad, input) +cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor <- function(grad, input, masked_grad) { + .Call(`_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor`, grad, input, masked_grad) } cpp_torch_namespace__coalesce_self_Tensor <- function(self) { @@ -7649,12 +7981,16 @@ cpp_torch_namespace_unbind_self_Tensor_dim_Dimname <- function(self, dim) { .Call(`_torch_cpp_torch_namespace_unbind_self_Tensor_dim_Dimname`, self, dim) } +cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor <- function(dense) { + .Call(`_torch_cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor`, dense) +} + cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor <- function(self, padding, stride, dilation, groups, input_size) { .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor`, self, padding, stride, dilation, groups, input_size) } -cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor <- function(self, padding, stride, dilation, groups) { - .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor`, self, padding, stride, dilation, groups) +cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor <- function(self, padding, stride, dilation, groups, input_size) { + .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor`, self, padding, stride, dilation, groups, input_size) } cpp_torch_namespace_to_mkldnn_backward_grad_Tensor_input_Tensor <- function(grad, input) { @@ -7825,8 +8161,8 @@ cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar <- function(scalar .Call(`_torch_cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar`, scalar1, scalar2) } -cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType <- function(from, to) { - .Call(`_torch_cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType`, from, to) +cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType <- function(from_, to) { + .Call(`_torch_cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType`, from_, to) } cpp_torch_namespace_promote_types_type1_ScalarType_type2_ScalarType <- function(type1, type2) { @@ -7973,6 +8309,10 @@ cpp_torch_namespace_masked_scatter_self_Tensor_mask_Tensor_source_Tensor <- func .Call(`_torch_cpp_torch_namespace_masked_scatter_self_Tensor_mask_Tensor_source_Tensor`, self, mask, source) } +cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef <- function(grad_output, mask, sizes) { + .Call(`_torch_cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef`, grad_output, mask, sizes) +} + cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor <- function(self, mask, dim, mask_type) { .Call(`_torch_cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor`, self, mask, dim, mask_type) } @@ -8509,6 +8849,14 @@ cpp_torch_namespace_nonzero_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_nonzero_self_Tensor`, self) } +cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t <- function(out, self, size, fill_value) { + .Call(`_torch_cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t`, out, self, size, fill_value) +} + +cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t <- function(self, size, fill_value) { + .Call(`_torch_cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t`, self, size, fill_value) +} + cpp_torch_namespace_nonzero_numpy_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_nonzero_numpy_self_Tensor`, self) } @@ -8901,6 +9249,10 @@ cpp_torch_namespace_min_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_min_self_Tensor`, self) } +cpp_torch_namespace_min_out_out_Tensor_self_Tensor <- function(out, self) { + .Call(`_torch_cpp_torch_namespace_min_out_out_Tensor_self_Tensor`, out, self) +} + cpp_torch_namespace_fmin_self_Tensor_other_Tensor <- function(self, other) { .Call(`_torch_cpp_torch_namespace_fmin_self_Tensor_other_Tensor`, self, other) } @@ -9037,6 +9389,10 @@ cpp_torch_namespace_argsort_self_Tensor_stable_bool_dim_int64_t <- function(self .Call(`_torch_cpp_torch_namespace_argsort_self_Tensor_stable_bool_dim_int64_t`, self, stable, dim, descending) } +cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool <- function(out, self, stable, dim, descending) { + .Call(`_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool`, out, self, stable, dim, descending) +} + cpp_torch_namespace_argsort_self_Tensor_dim_Dimname <- function(self, dim, descending) { .Call(`_torch_cpp_torch_namespace_argsort_self_Tensor_dim_Dimname`, self, dim, descending) } @@ -9169,156 +9525,156 @@ cpp_torch_namespace__foreach_add__self_TensorList_scalar_Scalar <- function(self invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList <- function(self, other, alpha) { + .Call(`_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList`, self, other, alpha) } -cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList <- function(self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList`, self, other, alpha)) } -cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } -cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor <- function(self, other, alpha) { + .Call(`_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor`, self, other, alpha) } -cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor <- function(self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor`, self, other, alpha)) } -cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList <- function(self, other, alpha) { + .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList`, self, other, alpha) } -cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList <- function(self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList`, self, other, alpha)) } -cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } -cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar <- function(self, scalar) { - .Call(`_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar`, self, scalar) +cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar <- function(self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar`, self, scalar)) +cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList <- function(self, other, alpha) { - .Call(`_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList`, self, other, alpha) +cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList`, self, other) } -cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList <- function(self, other, alpha) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList`, self, other, alpha)) +cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList`, self, other)) } -cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList <- function(self, other, alpha) { - .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList`, self, other, alpha) +cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } -cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList <- function(self, other, alpha) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList`, self, other, alpha)) +cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor`, self, other) } -cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor`, self, other)) } -cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList`, self, other) } -cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList`, self, other)) } -cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } -cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor`, self, other) } -cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor`, self, other)) } -cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList <- function(self, other) { - .Call(`_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList`, self, other) +cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList <- function(self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList`, self, other)) +cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar`, self, scalars) +cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList`, self, other) } -cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) +cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList`, self, other)) } -cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar`, self, scalars) +cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } -cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) +cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar`, self, scalars) +cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) +cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar`, self, scalar)) } -cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar`, self, scalars) +cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList`, self, other) } -cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) +cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList`, self, other)) } cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { @@ -9329,12 +9685,20 @@ cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar < invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar`, self, scalars) +cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar`, self, scalar) } -cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) +cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar`, self, scalar)) +} + +cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList`, self, other) +} + +cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList`, self, other)) } cpp_torch_namespace__foreach_maximum_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { @@ -9345,6 +9709,22 @@ cpp_torch_namespace__foreach_maximum__self_TensorList_scalars_ArrayRefScalar <- invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } +cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar <- function(self, scalar) { + .Call(`_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar`, self, scalar) +} + +cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar <- function(self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar`, self, scalar)) +} + +cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList <- function(self, other) { + .Call(`_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList`, self, other) +} + +cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList <- function(self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList`, self, other)) +} + cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar <- function(self, scalars) { .Call(`_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar`, self, scalars) } @@ -9353,24 +9733,52 @@ cpp_torch_namespace__foreach_minimum__self_TensorList_scalars_ArrayRefScalar <- invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalars_ArrayRefScalar`, self, scalars)) } -cpp_torch_namespace__foreach_exp_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_exp_self_TensorList`, self) +cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { + .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value) } -cpp_torch_namespace__foreach_zero__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_zero__self_TensorList`, self)) +cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars) } -cpp_torch_namespace__foreach_exp__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_exp__self_TensorList`, self)) +cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars) } -cpp_torch_namespace__foreach_sqrt_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList`, self) +cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value)) } -cpp_torch_namespace__foreach_sqrt__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList`, self)) +cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars)) +} + +cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars)) +} + +cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { + .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value) +} + +cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars) +} + +cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { + .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars) +} + +cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value)) +} + +cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars)) +} + +cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars)) } cpp_torch_namespace__foreach_abs_self_TensorList <- function(self) { @@ -9445,6 +9853,14 @@ cpp_torch_namespace__foreach_erfc__self_TensorList <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_erfc__self_TensorList`, self)) } +cpp_torch_namespace__foreach_exp_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_exp_self_TensorList`, self) +} + +cpp_torch_namespace__foreach_exp__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_exp__self_TensorList`, self)) +} + cpp_torch_namespace__foreach_expm1_self_TensorList <- function(self) { .Call(`_torch_cpp_torch_namespace__foreach_expm1_self_TensorList`, self) } @@ -9461,6 +9877,38 @@ cpp_torch_namespace__foreach_floor__self_TensorList <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_floor__self_TensorList`, self)) } +cpp_torch_namespace__foreach_frac_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_frac_self_TensorList`, self) +} + +cpp_torch_namespace__foreach_frac__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_frac__self_TensorList`, self)) +} + +cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList <- function(self, tensors1, weights) { + .Call(`_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList`, self, tensors1, weights) +} + +cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList <- function(self, tensors1, weights) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList`, self, tensors1, weights)) +} + +cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar <- function(self, tensors1, weight) { + .Call(`_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar`, self, tensors1, weight) +} + +cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar <- function(self, tensors1, weight) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar`, self, tensors1, weight)) +} + +cpp_torch_namespace__foreach_lgamma_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList`, self) +} + +cpp_torch_namespace__foreach_lgamma__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList`, self)) +} + cpp_torch_namespace__foreach_log_self_TensorList <- function(self) { .Call(`_torch_cpp_torch_namespace__foreach_log_self_TensorList`, self) } @@ -9493,6 +9941,10 @@ cpp_torch_namespace__foreach_log2__self_TensorList <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_log2__self_TensorList`, self)) } +cpp_torch_namespace__foreach_max_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_max_self_TensorList`, self) +} + cpp_torch_namespace__foreach_neg_self_TensorList <- function(self) { .Call(`_torch_cpp_torch_namespace__foreach_neg_self_TensorList`, self) } @@ -9501,60 +9953,36 @@ cpp_torch_namespace__foreach_neg__self_TensorList <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_neg__self_TensorList`, self)) } -cpp_torch_namespace__foreach_tan_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_tan_self_TensorList`, self) +cpp_torch_namespace__foreach_norm_self_TensorList <- function(self, ord, dtype) { + .Call(`_torch_cpp_torch_namespace__foreach_norm_self_TensorList`, self, ord, dtype) } -cpp_torch_namespace__foreach_tan__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_tan__self_TensorList`, self)) +cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList <- function(self, exponent) { + .Call(`_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList`, self, exponent) } -cpp_torch_namespace__foreach_tanh_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_tanh_self_TensorList`, self) -} - -cpp_torch_namespace__foreach_tanh__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_tanh__self_TensorList`, self)) +cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar <- function(self, exponent) { + .Call(`_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar`, self, exponent) } -cpp_torch_namespace__foreach_sin_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_sin_self_TensorList`, self) -} - -cpp_torch_namespace__foreach_sin__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sin__self_TensorList`, self)) -} - -cpp_torch_namespace__foreach_sinh_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_sinh_self_TensorList`, self) -} - -cpp_torch_namespace__foreach_sinh__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sinh__self_TensorList`, self)) -} - -cpp_torch_namespace__foreach_round_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_round_self_TensorList`, self) -} - -cpp_torch_namespace__foreach_round__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_round__self_TensorList`, self)) +cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar <- function(self, exponent) { + .Call(`_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar`, self, exponent) } -cpp_torch_namespace__foreach_lgamma_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList`, self) +cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList <- function(self, exponent) { + .Call(`_torch_cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList`, self, exponent) } -cpp_torch_namespace__foreach_lgamma__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList`, self)) +cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList <- function(self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList`, self, exponent)) } -cpp_torch_namespace__foreach_frac_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_frac_self_TensorList`, self) +cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar <- function(self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar`, self, exponent)) } -cpp_torch_namespace__foreach_frac__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_frac__self_TensorList`, self)) +cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar <- function(self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar`, self, exponent)) } cpp_torch_namespace__foreach_reciprocal_self_TensorList <- function(self) { @@ -9565,88 +9993,88 @@ cpp_torch_namespace__foreach_reciprocal__self_TensorList <- function(self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_reciprocal__self_TensorList`, self)) } -cpp_torch_namespace__foreach_sigmoid_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_sigmoid_self_TensorList`, self) +cpp_torch_namespace__foreach_round_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_round_self_TensorList`, self) } -cpp_torch_namespace__foreach_sigmoid__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sigmoid__self_TensorList`, self)) +cpp_torch_namespace__foreach_round__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_round__self_TensorList`, self)) } -cpp_torch_namespace__foreach_trunc_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_trunc_self_TensorList`, self) +cpp_torch_namespace__foreach_sigmoid_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_sigmoid_self_TensorList`, self) } -cpp_torch_namespace__foreach_trunc__self_TensorList <- function(self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_trunc__self_TensorList`, self)) +cpp_torch_namespace__foreach_sigmoid__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sigmoid__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value)) +cpp_torch_namespace__foreach_sign_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_sign_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value)) +cpp_torch_namespace__foreach_sign__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sign__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_sin_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_sin_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_sin__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sin__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_sinh_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_sinh_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_sinh__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sinh__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { - .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value) +cpp_torch_namespace__foreach_sqrt_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(self, tensor1, tensor2, value) { - .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList`, self, tensor1, tensor2, value) +cpp_torch_namespace__foreach_sqrt__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars) +cpp_torch_namespace__foreach_tan_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_tan_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars) +cpp_torch_namespace__foreach_tan__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_tan__self_TensorList`, self)) } -cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(self, tensor1, tensor2, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, self, tensor1, tensor2, scalars) +cpp_torch_namespace__foreach_tanh_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_tanh_self_TensorList`, self) } -cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(self, tensor1, tensor2, scalars) { - .Call(`_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, self, tensor1, tensor2, scalars) +cpp_torch_namespace__foreach_tanh__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_tanh__self_TensorList`, self)) } -cpp_torch_namespace__foreach_norm_self_TensorList <- function(self, ord) { - .Call(`_torch_cpp_torch_namespace__foreach_norm_self_TensorList`, self, ord) +cpp_torch_namespace__foreach_trunc_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_trunc_self_TensorList`, self) } -cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList <- function(self, tensors1, weights) { - .Call(`_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList`, self, tensors1, weights) +cpp_torch_namespace__foreach_trunc__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_trunc__self_TensorList`, self)) } -cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList <- function(self, tensors1, weights) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList`, self, tensors1, weights)) +cpp_torch_namespace__foreach_zero__self_TensorList <- function(self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_zero__self_TensorList`, self)) } -cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar <- function(self, tensors1, weight) { - .Call(`_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar`, self, tensors1, weight) +cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList <- function(self, src, non_blocking) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList`, self, src, non_blocking)) } -cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar <- function(self, tensors1, weight) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar`, self, tensors1, weight)) +cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList <- function(self, src, non_blocking) { + .Call(`_torch_cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList`, self, src, non_blocking) } cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor <- function(self, boundaries, out_int32, right) { @@ -9673,6 +10101,10 @@ cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Scalar <- function( .Call(`_torch_cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Scalar`, sorted_sequence, self, out_int32, right, side, sorter) } +cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar <- function(out, sorted_sequence, self, out_int32, right, side, sorter) { + .Call(`_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar`, out, sorted_sequence, self, out_int32, right, side, sorter) +} + cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t <- function(self, size, out_int32) { .Call(`_torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t`, self, size, out_int32) } @@ -11361,6 +11793,10 @@ cpp_torch_namespace_linalg_eig_out_eigenvalues_Tensor_eigenvectors_Tensor_self_T .Call(`_torch_cpp_torch_namespace_linalg_eig_out_eigenvalues_Tensor_eigenvectors_Tensor_self_Tensor`, eigenvalues, eigenvectors, self) } +cpp_torch_namespace__linalg_eigvals_self_Tensor <- function(self) { + .Call(`_torch_cpp_torch_namespace__linalg_eigvals_self_Tensor`, self) +} + cpp_torch_namespace_linalg_eigvals_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_linalg_eigvals_self_Tensor`, self) } @@ -11581,6 +12017,10 @@ cpp_torch_namespace_linalg_solve_A_Tensor_B_Tensor <- function(A, B, left) { .Call(`_torch_cpp_torch_namespace_linalg_solve_A_Tensor_B_Tensor`, A, B, left) } +cpp_torch_namespace__spsolve_A_Tensor_B_Tensor <- function(A, B, left) { + .Call(`_torch_cpp_torch_namespace__spsolve_A_Tensor_B_Tensor`, A, B, left) +} + cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor <- function(out, A, B, left) { .Call(`_torch_cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor`, out, A, B, left) } @@ -11665,6 +12105,10 @@ cpp_torch_namespace__test_serialization_subcmul_self_Tensor_other_Tensor <- func .Call(`_torch_cpp_torch_namespace__test_serialization_subcmul_self_Tensor_other_Tensor`, self, other, alpha) } +cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t <- function(self, num_parallel, skip_first) { + .Call(`_torch_cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t`, self, num_parallel, skip_first) +} + cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef <- function(values, addends) { .Call(`_torch_cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef`, values, addends) } @@ -11705,8 +12149,8 @@ cpp_torch_namespace__segment_reduce_backward_grad_Tensor_output_Tensor_data_Tens .Call(`_torch_cpp_torch_namespace__segment_reduce_backward_grad_Tensor_output_Tensor_data_Tensor_reduce_c10string_view`, grad, output, data, reduce, lengths, offsets, axis, initial) } -cpp_torch_namespace_pad_sequence_sequences_TensorList <- function(sequences, batch_first, padding_value) { - .Call(`_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList`, sequences, batch_first, padding_value) +cpp_torch_namespace_pad_sequence_sequences_TensorList <- function(sequences, batch_first, padding_value, padding_side) { + .Call(`_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList`, sequences, batch_first, padding_value, padding_side) } cpp_torch_namespace_flatten_dense_tensors_tensors_TensorList <- function(tensors) { @@ -11877,10 +12321,22 @@ cpp_torch_namespace_alias_copy_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_alias_copy_self_Tensor`, self) } +cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef <- function(values, offsets, max_lengths, padding_value) { + .Call(`_torch_cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef`, values, offsets, max_lengths, padding_value) +} + +cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList <- function(dense, offsets, total_L) { + .Call(`_torch_cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList`, dense, offsets, total_L) +} + cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor <- function(self, query) { .Call(`_torch_cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor`, self, query) } +cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t <- function(self, dim, dtype) { + .Call(`_torch_cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t`, self, dim, dtype) +} + cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor <- function(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type) { .Call(`_torch_cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor`, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type) } @@ -11889,62 +12345,86 @@ cpp_torch_namespace__native_multi_head_attention_query_Tensor_key_Tensor_value_T .Call(`_torch_cpp_torch_namespace__native_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor`, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type) } -cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal) { - .Call(`_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal) +cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa) { + .Call(`_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa) +} + +cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa) { + .Call(`_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa) +} + +cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa) +} + +cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale) } -cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal) +cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, dropout_p, is_causal, return_debug_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, dropout_p, is_causal, return_debug_mask, scale) } -cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal) { - .Call(`_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal) +cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, dropout_p, is_causal, attn_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor`, query, key, value, dropout_p, is_causal, attn_mask, scale) } -cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask) +cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor`, query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale) } -cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, dropout_p, is_causal, return_debug_mask) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, dropout_p, is_causal, return_debug_mask) +cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor`, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale) } -cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t`, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) +cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool <- function(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool`, grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale) } -cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool <- function(query, key, value, compute_log_sumexp, is_causal) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool`, query, key, value, compute_log_sumexp, is_causal) +cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor <- function(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor`, grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale) } -cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor <- function(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs) { - .Call(`_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor`, grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs) +cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool <- function(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool`, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale) } -cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor <- function(query, key, value, is_causal) { - .Call(`_torch_cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor`, query, key, value, is_causal) +cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4 <- function(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4`, grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale) } -cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool <- function(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask) { - .Call(`_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool`, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask) +cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool <- function(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool`, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale) } -cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) { - .Call(`_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t`, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) +cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool <- function(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale) { + .Call(`_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool`, grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale) } -cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t <- function(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal) { - .Call(`_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t`, query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal) +cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool <- function(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes) { + .Call(`_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool`, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes) } -cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor <- function(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs) { - .Call(`_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor`, grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs) +cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right) { + .Call(`_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor`, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right) +} + +cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t <- function(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size) { + .Call(`_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t`, query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size) +} + +cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool <- function(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv) { + .Call(`_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool`, grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv) } cpp_torch_namespace__triton_scaled_dot_attention_q_Tensor_k_Tensor_v_Tensor <- function(q, k, v, dropout_p) { .Call(`_torch_cpp_torch_namespace__triton_scaled_dot_attention_q_Tensor_k_Tensor_v_Tensor`, q, k, v, dropout_p) } +cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t <- function(self, dropout_p, seed, offset) { + .Call(`_torch_cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t`, self, dropout_p, seed, offset) +} + cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor <- function(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask) { .Call(`_torch_cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor`, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask) } @@ -11957,14 +12437,6 @@ cpp_torch_namespace_special_airy_ai_out_out_Tensor_x_Tensor <- function(out, x) .Call(`_torch_cpp_torch_namespace_special_airy_ai_out_out_Tensor_x_Tensor`, out, x) } -cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor <- function(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value) { - .Call(`_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor`, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value) -} - -cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor <- function(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights) { - .Call(`_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor`, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights) -} - cpp_torch_namespace_special_bessel_j0_self_Tensor <- function(self) { .Call(`_torch_cpp_torch_namespace_special_bessel_j0_self_Tensor`, self) } @@ -12337,20 +12809,44 @@ cpp_torch_namespace_special_spherical_bessel_j0_x_Tensor <- function(x) { .Call(`_torch_cpp_torch_namespace_special_spherical_bessel_j0_x_Tensor`, x) } -cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor <- function(out, x) { - .Call(`_torch_cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor`, out, x) +cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor <- function(out, x) { + .Call(`_torch_cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor`, out, x) +} + +cpp_torch_namespace__foobar_self_Tensor <- function(self, arg1, arg2, arg3) { + .Call(`_torch_cpp_torch_namespace__foobar_self_Tensor`, self, arg1, arg2, arg3) +} + +cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)) } -cpp_torch_namespace__foobar_self_Tensor <- function(self, arg1, arg2, arg3) { - .Call(`_torch_cpp_torch_namespace__foobar_self_Tensor`, self, arg1, arg2, arg3) +cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)) } -cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { - invisible(.Call(`_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool <- function(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool`, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf)) } -cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { - invisible(.Call(`_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor <- function(input, output) { + invisible(.Call(`_torch_cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor`, input, output)) } cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor <- function(out, self, other, self_num_batch_dims) { @@ -12409,6 +12905,10 @@ cpp_torch_namespace_affine_grid_generator_out_out_Tensor_theta_Tensor_size_IntAr .Call(`_torch_cpp_torch_namespace_affine_grid_generator_out_out_Tensor_theta_Tensor_size_IntArrayRef_align_corners_bool`, out, theta, size, align_corners) } +cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor <- function(out, self, other) { + .Call(`_torch_cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor`, out, self, other) +} + cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t <- function(out, window_length) { .Call(`_torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t`, out, window_length) } @@ -12517,10 +13017,6 @@ cpp_torch_namespace_cudnn_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_T .Call(`_torch_cpp_torch_namespace_cudnn_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_grad_output_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_epsilon_double_reserveSpace_Tensor`, out0, out1, out2, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace) } -cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool <- function(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) { - .Call(`_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool`, out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) -} - cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool <- function(out, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) { .Call(`_torch_cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool`, out, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32) } @@ -12613,6 +13109,10 @@ cpp_torch_namespace_empty_out_out_Tensor_size_IntArrayRef_names_DimnameList <- f .Call(`_torch_cpp_torch_namespace_empty_out_out_Tensor_size_IntArrayRef_names_DimnameList`, out, size, names, memory_format) } +cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef <- function(out, size, physical_layout) { + .Call(`_torch_cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef`, out, size, physical_layout) +} + cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef <- function(out, self, size) { .Call(`_torch_cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef`, out, self, size) } @@ -12677,6 +13177,10 @@ cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Tensor <- function(out .Call(`_torch_cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Tensor`, out, self, value) } +cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar <- function(out, self, other) { + .Call(`_torch_cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar`, out, self, other) +} + cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList <- function(out, size, fill_value, names) { .Call(`_torch_cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList`, out, size, fill_value, names) } @@ -12753,16 +13257,16 @@ cpp_torch_namespace_native_group_norm_backward_out_out0_Tensor_out1_Tensor_out2_ .Call(`_torch_cpp_torch_namespace_native_group_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_rstd_Tensor_weight_Tensor_FALSE_int64_t_C_int64_t_HxW_int64_t_group_int64_t_output_mask_stdarraybool3`, out0, out1, out2, grad_out, input, mean, rstd, weight, False, C, HxW, group, output_mask) } -cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(out, self, indices, values, accumulate) { - .Call(`_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, out, self, indices, values, accumulate) +cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(out, self, indices, values, accumulate) { + .Call(`_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, out, self, indices, values, accumulate) } -cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(out, self, indices, values, accumulate, unsafe) { - .Call(`_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, out, self, indices, values, accumulate, unsafe) +cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(out, self, indices, values, accumulate, unsafe) { + .Call(`_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, out, self, indices, values, accumulate, unsafe) } -cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor <- function(self, indices, values, accumulate, unsafe) { - .Call(`_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor`, self, indices, values, accumulate, unsafe) +cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor <- function(self, indices, values, accumulate, unsafe) { + .Call(`_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor`, self, indices, values, accumulate, unsafe) } cpp_torch_namespace_isnan_out_out_Tensor_self_Tensor <- function(out, self) { @@ -12837,6 +13341,10 @@ cpp_torch_namespace_quantized_max_pool2d_out_out_Tensor_self_Tensor_kernel_size_ .Call(`_torch_cpp_torch_namespace_quantized_max_pool2d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef`, out, self, kernel_size, stride, padding, dilation, ceil_mode) } +cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef <- function(out, self, kernel_size, stride, padding, dilation, ceil_mode) { + .Call(`_torch_cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef`, out, self, kernel_size, stride, padding, dilation, ceil_mode) +} + cpp_torch_namespace_median_out_out_Tensor_self_Tensor <- function(out, self) { .Call(`_torch_cpp_torch_namespace_median_out_out_Tensor_self_Tensor`, out, self) } @@ -12905,6 +13413,10 @@ cpp_torch_namespace__native_batch_norm_legit_functional_input_Tensor_weight_Tens .Call(`_torch_cpp_torch_namespace__native_batch_norm_legit_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, training, momentum, eps) } +cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps) +} + cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double <- function(out0, out1, input, eps) { .Call(`_torch_cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double`, out0, out1, input, eps) } @@ -12925,8 +13437,8 @@ cpp_torch_namespace_batch_norm_backward_reduce_out_out0_Tensor_out1_Tensor_out2_ .Call(`_torch_cpp_torch_namespace_batch_norm_backward_reduce_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool`, out0, out1, out2, out3, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g) } -cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor <- function(out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) { - .Call(`_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor`, out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) +cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor <- function(out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) { + .Call(`_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor`, out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) } cpp_torch_namespace_batch_norm_update_stats_out_out0_Tensor_out1_Tensor_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double <- function(out0, out1, input, running_mean, running_var, momentum) { @@ -13113,12 +13625,24 @@ cpp_torch_namespace__nested_tensor_strides_out_out_Tensor_self_Tensor <- functio .Call(`_torch_cpp_torch_namespace__nested_tensor_strides_out_out_Tensor_self_Tensor`, out, self) } +cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor <- function(out, self) { + .Call(`_torch_cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor`, out, self) +} + cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor <- function(out, padded, nt_example) { .Call(`_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor`, out, padded, nt_example) } -cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef <- function(out, self, nested_size, nested_strides, offsets) { - .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef`, out, self, nested_size, nested_strides, offsets) +cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor <- function(out, self, nested_size, nested_strides, offsets) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor`, out, self, nested_size, nested_strides, offsets) +} + +cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor <- function(out, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) { + .Call(`_torch_cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor`, out, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen) +} + +cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor <- function(out, self) { + .Call(`_torch_cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor`, out, self) } cpp_torch_namespace__trilinear_out_out_Tensor_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef <- function(out, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim) { @@ -13201,6 +13725,14 @@ cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar_dim_IntArray .Call(`_torch_cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType`, out, self, p, dim, keepdim, dtype) } +cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, input, weight, bias, running_mean, running_var, momentum, eps) +} + +cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double <- function(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps) { + .Call(`_torch_cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double`, out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps) +} + cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef <- function(out, self, dim) { .Call(`_torch_cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef`, out, self, dim) } @@ -13297,8 +13829,8 @@ cpp_torch_namespace__sparse_coo_tensor_with_dims_out_out_Tensor_sparse_dim_int64 .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef`, out, sparse_dim, dense_dim, size) } -cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor <- function(out, sparse_dim, dense_dim, size, indices, values) { - .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor`, out, sparse_dim, dense_dim, size, indices, values) +cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor <- function(out, sparse_dim, dense_dim, size, indices, values, is_coalesced) { + .Call(`_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor`, out, sparse_dim, dense_dim, size, indices, values, is_coalesced) } cpp_torch_namespace_sparse_resize_out_out_Tensor_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t <- function(out, self, size, sparse_dim, dense_dim) { @@ -13321,8 +13853,12 @@ cpp_torch_namespace_sparse_mask_out_out_Tensor_self_Tensor_mask_Tensor <- functi .Call(`_torch_cpp_torch_namespace_sparse_mask_out_out_Tensor_self_Tensor_mask_Tensor`, out, self, mask) } -cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor <- function(out, self, dtype) { - .Call(`_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor`, out, self, dtype) +cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor <- function(out, self, mask, accumulate_matches) { + .Call(`_torch_cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor`, out, self, mask, accumulate_matches) +} + +cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor <- function(out, self, dtype, masked_grad) { + .Call(`_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor`, out, self, dtype, masked_grad) } cpp_torch_namespace__coalesce_out_out_Tensor_self_Tensor <- function(out, self) { @@ -13345,28 +13881,28 @@ cpp_torch_namespace_copy_sparse_to_sparse_self_Tensor_src_Tensor <- function(sel .Call(`_torch_cpp_torch_namespace_copy_sparse_to_sparse_self_Tensor_src_Tensor`, self, src, non_blocking) } -cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t <- function(out, self, sparse_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t`, out, self, sparse_dim) +cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t <- function(out, self, sparse_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t`, out, self, sparse_dim) } -cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor <- function(out, self, layout, blocksize, dense_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor`, out, self, layout, blocksize, dense_dim) +cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor <- function(out, self, layout, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor`, out, self, layout, blocksize, dense_dim) } -cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor <- function(out, self, dense_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor`, out, self, dense_dim) +cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor <- function(out, self, dense_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor`, out, self, dense_dim) } -cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor <- function(out, self, dense_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor`, out, self, dense_dim) +cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor <- function(out, self, dense_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor`, out, self, dense_dim) } -cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef <- function(out, self, blocksize, dense_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef`, out, self, blocksize, dense_dim) +cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef <- function(out, self, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef`, out, self, blocksize, dense_dim) } -cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef <- function(out, self, blocksize, dense_dim) { - .Call(`_torch_cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef`, out, self, blocksize, dense_dim) +cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef <- function(out, self, blocksize, dense_dim) { + .Call(`_torch_cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef`, out, self, blocksize, dense_dim) } cpp_torch_namespace_to_mkldnn_out_out_Tensor_self_Tensor <- function(out, self, dtype) { @@ -13377,8 +13913,8 @@ cpp_torch_namespace_mkldnn_reorder_conv2d_weight_out_out_Tensor_self_Tensor <- f .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_out_out_Tensor_self_Tensor`, out, self, padding, stride, dilation, groups, input_size) } -cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor <- function(out, self, padding, stride, dilation, groups) { - .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor`, out, self, padding, stride, dilation, groups) +cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor <- function(out, self, padding, stride, dilation, groups, input_size) { + .Call(`_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor`, out, self, padding, stride, dilation, groups, input_size) } cpp_torch_namespace_quantize_per_tensor_dynamic_out_out_Tensor_self_Tensor_dtype_ScalarType_reduce_range_bool <- function(out, self, dtype, reduce_range) { @@ -13697,10 +14233,6 @@ cpp_torch_namespace_remainder_out_out_Tensor_self_Scalar_other_Tensor <- functio .Call(`_torch_cpp_torch_namespace_remainder_out_out_Tensor_self_Scalar_other_Tensor`, out, self, other) } -cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool <- function(out, self, stable, dim, descending) { - .Call(`_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool`, out, self, stable, dim, descending) -} - cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t <- function(out, grad_in, input_sizes, dim, size, step) { .Call(`_torch_cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t`, out, grad_in, input_sizes, dim, size, step) } @@ -13729,112 +14261,132 @@ cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalar_Scala invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other, alpha)) } -cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor <- function(out, self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor`, out, self, other, alpha)) } -cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other, alpha) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other, alpha)) } -cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other, alpha) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other, alpha)) +cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) } -cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other, alpha) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other, alpha)) +cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor`, out, self, other)) } -cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) } -cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor`, out, self, other)) } -cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) +cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) } -cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) +cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) +cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) } -cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) +cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) } cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) +cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +} + +cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) } cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } +cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar <- function(out, self, scalar) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar`, out, self, scalar)) +} + +cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList <- function(out, self, other) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList`, out, self, other)) +} + cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar <- function(out, self, scalars) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar`, out, self, scalars)) } -cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(out, self, tensor1, tensor2, value) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList`, out, self, tensor1, tensor2, value)) } -cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(out, self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, out, self, tensor1, tensor2, scalars)) } -cpp_torch_namespace__foreach_zero_self_TensorList <- function(self) { - .Call(`_torch_cpp_torch_namespace__foreach_zero_self_TensorList`, self) +cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(out, self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, out, self, tensor1, tensor2, scalars)) } -cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(out, self, tensor1, tensor2, value) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList`, out, self, tensor1, tensor2, value)) +} + +cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(out, self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, out, self, tensor1, tensor2, scalars)) +} + +cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(out, self, tensor1, tensor2, scalars) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, out, self, tensor1, tensor2, scalars)) } cpp_torch_namespace__foreach_abs_out_out_TensorList_self_TensorList <- function(out, self) { @@ -13873,6 +14425,10 @@ cpp_torch_namespace__foreach_erfc_out_out_TensorList_self_TensorList <- function invisible(.Call(`_torch_cpp_torch_namespace__foreach_erfc_out_out_TensorList_self_TensorList`, out, self)) } +cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList`, out, self)) +} + cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList <- function(out, self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList`, out, self)) } @@ -13881,6 +14437,22 @@ cpp_torch_namespace__foreach_floor_out_out_TensorList_self_TensorList <- functio invisible(.Call(`_torch_cpp_torch_namespace__foreach_floor_out_out_TensorList_self_TensorList`, out, self)) } +cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList`, out, self)) +} + +cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList <- function(out, self, tensors1, weights) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList`, out, self, tensors1, weights)) +} + +cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar <- function(out, self, tensors1, weight) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar`, out, self, tensors1, weight)) +} + +cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList`, out, self)) +} + cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList <- function(out, self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList`, out, self)) } @@ -13897,94 +14469,86 @@ cpp_torch_namespace__foreach_log2_out_out_TensorList_self_TensorList <- function invisible(.Call(`_torch_cpp_torch_namespace__foreach_log2_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList`, out, self)) -} - -cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList`, out, self)) -} - -cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList <- function(out, self, ord, dtype) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList`, out, self, ord, dtype)) } -cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList <- function(out, self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList`, out, self, exponent)) } -cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar <- function(out, self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar`, out, self, exponent)) } -cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar <- function(out, self, exponent) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar`, out, self, exponent)) } cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList <- function(out, self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList`, out, self)) } +cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList`, out, self)) +} + cpp_torch_namespace__foreach_sigmoid_out_out_TensorList_self_TensorList <- function(out, self) { invisible(.Call(`_torch_cpp_torch_namespace__foreach_sigmoid_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList <- function(out, self) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList`, out, self)) +cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(out, self, tensor1, tensor2, value) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList`, out, self, tensor1, tensor2, value)) +cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList <- function(out, self, tensor1, tensor2, value) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList`, out, self, tensor1, tensor2, value)) +cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(out, self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, out, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(out, self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, out, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar <- function(out, self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar`, out, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor <- function(out, self, tensor1, tensor2, scalars) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor`, out, self, tensor1, tensor2, scalars)) +cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList <- function(out, self, ord) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList`, out, self, ord)) +cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList <- function(out, self) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList`, out, self)) } -cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList <- function(out, self, tensors1, weights) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList`, out, self, tensors1, weights)) +cpp_torch_namespace__foreach_zero_self_TensorList <- function(self) { + .Call(`_torch_cpp_torch_namespace__foreach_zero_self_TensorList`, self) } -cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar <- function(out, self, tensors1, weight) { - invisible(.Call(`_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar`, out, self, tensors1, weight)) +cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList <- function(out, self, src, non_blocking) { + invisible(.Call(`_torch_cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList`, out, self, src, non_blocking)) } cpp_torch_namespace_bucketize_out_out_Tensor_self_Scalar_boundaries_Tensor <- function(out, self, boundaries, out_int32, right) { .Call(`_torch_cpp_torch_namespace_bucketize_out_out_Tensor_self_Scalar_boundaries_Tensor`, out, self, boundaries, out_int32, right) } -cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar <- function(out, sorted_sequence, self, out_int32, right, side, sorter) { - .Call(`_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar`, out, sorted_sequence, self, out_int32, right, side, sorter) -} - cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t <- function(out, glu, x, dx, dim) { .Call(`_torch_cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t`, out, glu, x, dx, dim) } @@ -14233,14 +14797,6 @@ cpp_torch_namespace__triton_multi_head_attention_out_out_Tensor_query_Tensor_key .Call(`_torch_cpp_torch_namespace__triton_multi_head_attention_out_out_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor`, out, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask) } -cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor <- function(out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value) { - .Call(`_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor`, out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value) -} - -cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor <- function(out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights) { - .Call(`_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor`, out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights) -} - cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor <- function(out, self, arg1, arg2, arg3) { .Call(`_torch_cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor`, out, self, arg1, arg2, arg3) } @@ -14253,6 +14809,14 @@ cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_Tensor .Call(`_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) } +cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + .Call(`_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) +} + cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { invisible(.Call(`_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) } @@ -14261,6 +14825,38 @@ cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_Tenso .Call(`_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) } +cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) { + .Call(`_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool`, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf) +} + +cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + .Call(`_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) +} + +cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) { + .Call(`_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool`, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf) +} + +cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool <- function(out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf) { + invisible(.Call(`_torch_cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool`, out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf)) +} + +cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool <- function(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf) { + .Call(`_torch_cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool`, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf) +} + cpp_torch_generator <- function() { .Call(`_torch_cpp_torch_generator`) } diff --git a/R/gen-method.R b/R/gen-method.R index 6ca8fd6a9d..e88be8b8d9 100644 --- a/R/gen-method.R +++ b/R/gen-method.R @@ -316,26 +316,26 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "_neg_view", function() { args <- list() +Tensor$set("public", "_lazy_clone", function() { args <- list() args <- c(list(self = self), args) expected_types <- list(self = "Tensor") nd_args <- "self" return_types <- list(list('Tensor')) call_c_function( - fun_name = '_neg_view', + fun_name = '_lazy_clone', args = args, expected_types = expected_types, nd_args = nd_args, return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "_nested_tensor_offsets", function() { args <- list() +Tensor$set("public", "_neg_view", function() { args <- list() args <- c(list(self = self), args) expected_types <- list(self = "Tensor") nd_args <- "self" -return_types <- list(list('IntArrayRef')) +return_types <- list(list('Tensor')) call_c_function( - fun_name = '_nested_tensor_offsets', + fun_name = '_neg_view', args = args, expected_types = expected_types, nd_args = nd_args, @@ -355,6 +355,19 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) +Tensor$set("public", "_nested_tensor_storage_offsets", function() { args <- list() +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_nested_tensor_storage_offsets', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) Tensor$set("public", "_nested_tensor_strides", function() { args <- list() args <- c(list(self = self), args) expected_types <- list(self = "Tensor") @@ -394,9 +407,22 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "_to_dense", function(dtype = NULL) { args <- mget(x = c("dtype")) +Tensor$set("public", "_sparse_mask_projection", function(mask, accumulate_matches = FALSE) { args <- mget(x = c("mask", "accumulate_matches")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", dtype = "ScalarType") +expected_types <- list(self = "Tensor", mask = "Tensor", accumulate_matches = "bool") +nd_args <- c("self", "mask") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_sparse_mask_projection', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) +Tensor$set("public", "_to_dense", function(dtype = NULL, masked_grad = NULL) { args <- mget(x = c("dtype", "masked_grad")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", dtype = "ScalarType", masked_grad = "bool") nd_args <- "self" return_types <- list(list('Tensor')) call_c_function( @@ -407,6 +433,72 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) +Tensor$set("public", "_to_sparse", function(layout = NULL, sparse_dim, blocksize = NULL, dense_dim = NULL) { args <- mget(x = c("layout", "sparse_dim", "blocksize", "dense_dim")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", layout = "Layout", sparse_dim = "int64_t", + blocksize = "IntArrayRef", dense_dim = "int64_t") +nd_args <- c("self", "sparse_dim") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_to_sparse', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) +Tensor$set("public", "_to_sparse_bsc", function(blocksize, dense_dim = NULL) { args <- mget(x = c("blocksize", "dense_dim")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", blocksize = "IntArrayRef", dense_dim = "int64_t") +nd_args <- c("self", "blocksize") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_to_sparse_bsc', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) +Tensor$set("public", "_to_sparse_bsr", function(blocksize, dense_dim = NULL) { args <- mget(x = c("blocksize", "dense_dim")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", blocksize = "IntArrayRef", dense_dim = "int64_t") +nd_args <- c("self", "blocksize") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_to_sparse_bsr', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) +Tensor$set("public", "_to_sparse_csc", function(dense_dim = NULL) { args <- mget(x = c("dense_dim")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", dense_dim = "int64_t") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_to_sparse_csc', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) +Tensor$set("public", "_to_sparse_csr", function(dense_dim = NULL) { args <- mget(x = c("dense_dim")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", dense_dim = "int64_t") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( + fun_name = '_to_sparse_csr', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) Tensor$set("public", "_values", function() { args <- list() args <- c(list(self = self), args) expected_types <- list(self = "Tensor") @@ -784,9 +876,10 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "all", function(dim, keepdim = FALSE) { args <- mget(x = c("dim", "keepdim")) +Tensor$set("public", "all", function(dim = NULL, keepdim = FALSE) { args <- mget(x = c("dim", "keepdim")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", dim = c("int64_t", "Dimname"), keepdim = "bool") +expected_types <- list(self = "Tensor", dim = c("int64_t", "IntArrayRef", "Dimname" +), keepdim = "bool") nd_args <- c("self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -863,9 +956,10 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "any", function(dim, keepdim = FALSE) { args <- mget(x = c("dim", "keepdim")) +Tensor$set("public", "any", function(dim = NULL, keepdim = FALSE) { args <- mget(x = c("dim", "keepdim")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", dim = c("int64_t", "Dimname"), keepdim = "bool") +expected_types <- list(self = "Tensor", dim = c("int64_t", "IntArrayRef", "Dimname" +), keepdim = "bool") nd_args <- c("self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -3274,7 +3368,7 @@ call_c_function( )}) Tensor$set("public", "index", function(indices) { args <- mget(x = c("indices")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", indices = "const c10::List> &") +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &") nd_args <- c("self", "indices") return_types <- list(list('Tensor')) call_c_function( @@ -3371,7 +3465,7 @@ call_c_function( )}) Tensor$set("public", "index_put", function(indices, values, accumulate = FALSE) { args <- mget(x = c("indices", "values", "accumulate")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -3385,7 +3479,7 @@ call_c_function( )}) Tensor$set("public", "index_put_", function(indices, values, accumulate = FALSE) { args <- mget(x = c("indices", "values", "accumulate")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -5059,6 +5153,19 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) +Tensor$set("public", "nonzero_static", function(size, fill_value = -1L) { args <- mget(x = c("size", "fill_value")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", size = "int64_t", fill_value = "int64_t") +nd_args <- c("self", "size") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = 'nonzero_static', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) Tensor$set("private", "_norm", function(p = 2L, dim, keepdim = FALSE, dtype) { args <- mget(x = c("p", "dim", "keepdim", "dtype")) args <- c(list(self = self), args) expected_types <- list(self = "Tensor", p = "Scalar", dim = c("IntArrayRef", "DimnameList" @@ -6215,6 +6322,20 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) +Tensor$set("public", "slice_inverse", function(src, dim = 1L, start = NULL, end = NULL, step = 1L) { args <- mget(x = c("src", "dim", "start", "end", "step")) +args <- c(list(self = self), args) +expected_types <- list(self = "Tensor", src = "Tensor", dim = "int64_t", start = "int64_t", + end = "int64_t", step = "int64_t") +nd_args <- c("self", "src") +return_types <- list(list('Tensor')) +call_c_function( + fun_name = 'slice_inverse', + args = args, + expected_types = expected_types, + nd_args = nd_args, + return_types = return_types, + fun_type = 'method' +)}) Tensor$set("public", "slice_scatter", function(src, dim = 1L, start = NULL, end = NULL, step = 1L) { args <- mget(x = c("src", "dim", "start", "end", "step")) args <- c(list(self = self), args) expected_types <- list(self = "Tensor", src = "Tensor", dim = "int64_t", start = "int64_t", @@ -6791,9 +6912,9 @@ call_c_function( return_types = return_types, fun_type = 'method' )}) -Tensor$set("public", "to_dense", function(dtype = NULL) { args <- mget(x = c("dtype")) +Tensor$set("public", "to_dense", function(dtype = NULL, masked_grad = NULL) { args <- mget(x = c("dtype", "masked_grad")) args <- c(list(self = self), args) -expected_types <- list(self = "Tensor", dtype = "ScalarType") +expected_types <- list(self = "Tensor", dtype = "ScalarType", masked_grad = "bool") nd_args <- "self" return_types <- list(list('Tensor')) call_c_function( diff --git a/R/gen-namespace-docs.R b/R/gen-namespace-docs.R index 9dcdd84f67..ad6550e6ce 100644 --- a/R/gen-namespace-docs.R +++ b/R/gen-namespace-docs.R @@ -4450,7 +4450,7 @@ NULL #' described in the type promotion documentation . #' #' -#' @param from (dtype) The original `torch_dtype`. +#' @param from_ (dtype) The original `torch_dtype`. #' @param to (dtype) The target `torch_dtype`. #' #' @name torch_can_cast diff --git a/R/gen-namespace.R b/R/gen-namespace.R index 9a80fd6f11..22c521607b 100644 --- a/R/gen-namespace.R +++ b/R/gen-namespace.R @@ -510,10 +510,10 @@ fun_type = 'namespace' #' @rdname torch__assert_async -torch__assert_async <- function(self) { - args <- mget(x = c("self")) -expected_types <- list(self = "Tensor") -nd_args <- "self" +torch__assert_async <- function(self, assert_msg) { + args <- mget(x = c("self", "assert_msg")) +expected_types <- list(self = "Tensor", assert_msg = "c10::string_view") +nd_args <- c("self", "assert_msg") return_types <- list(list("void")) call_c_function( fun_name = '_assert_async', @@ -526,6 +526,23 @@ fun_type = 'namespace' } +#' @rdname torch__assert_scalar +torch__assert_scalar <- function(self, assert_msg) { + args <- mget(x = c("self", "assert_msg")) +expected_types <- list(self = "Scalar", assert_msg = "c10::string_view") +nd_args <- c("self", "assert_msg") +return_types <- list(list("void")) +call_c_function( +fun_name = '_assert_scalar', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__assert_tensor_metadata torch__assert_tensor_metadata <- function(a, size = NULL, stride = NULL, dtype = NULL) { args <- mget(x = c("a", "size", "stride", "dtype")) @@ -586,6 +603,104 @@ fun_type = 'namespace' } +#' @rdname torch__batch_norm_no_update +torch__batch_norm_no_update <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("input", "weight", "bias", "running_mean", "running_var", "momentum", +"eps") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_batch_norm_no_update', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__batch_norm_no_update_out +torch__batch_norm_no_update_out <- function(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("out0", "out1", "out2", "out3", "input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", out3 = "Tensor", + input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("out0", "out1", "out2", "out3", "input", "weight", "bias", +"running_mean", "running_var", "momentum", "eps") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_batch_norm_no_update_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__batch_norm_with_update +torch__batch_norm_with_update <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("input", "weight", "bias", "running_mean", "running_var", "momentum", +"eps") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_batch_norm_with_update', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__batch_norm_with_update_functional +torch__batch_norm_with_update_functional <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("input", "weight", "bias", "running_mean", "running_var", "momentum", +"eps") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_batch_norm_with_update_functional', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__batch_norm_with_update_out +torch__batch_norm_with_update_out <- function(out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("out", "save_mean", "save_invstd", "reserve", "input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(out = "Tensor", save_mean = "Tensor", save_invstd = "Tensor", + reserve = "Tensor", input = "Tensor", weight = "Tensor", + bias = "Tensor", running_mean = "Tensor", running_var = "Tensor", + momentum = "double", eps = "double") +nd_args <- c("out", "save_mean", "save_invstd", "reserve", "input", "weight", +"bias", "running_mean", "running_var", "momentum", "eps") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_batch_norm_with_update_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__cast_Byte torch__cast_Byte <- function(self, non_blocking = FALSE) { args <- mget(x = c("self", "non_blocking")) @@ -844,14 +959,32 @@ fun_type = 'namespace' } -#' @rdname torch__chunk_grad_outputs_efficient_attention -torch__chunk_grad_outputs_efficient_attention <- function(query, key, value, is_causal = FALSE) { - args <- mget(x = c("query", "key", "value", "is_causal")) -expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", is_causal = "bool") -nd_args <- c("query", "key", "value") -return_types <- list(list('bool')) +#' @rdname torch__chunk_cat +torch__chunk_cat <- function(tensors, dim, num_chunks) { + args <- mget(x = c("tensors", "dim", "num_chunks")) +expected_types <- list(tensors = "TensorList", dim = "int64_t", num_chunks = "int64_t") +nd_args <- c("tensors", "dim", "num_chunks") +return_types <- list(list('Tensor')) call_c_function( -fun_name = '_chunk_grad_outputs_efficient_attention', +fun_name = '_chunk_cat', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__chunk_cat_out +torch__chunk_cat_out <- function(out, tensors, dim, num_chunks) { + args <- mget(x = c("out", "tensors", "dim", "num_chunks")) +expected_types <- list(out = "Tensor", tensors = "TensorList", dim = "int64_t", + num_chunks = "int64_t") +nd_args <- c("out", "tensors", "dim", "num_chunks") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_chunk_cat_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -1158,6 +1291,23 @@ fun_type = 'namespace' } +#' @rdname torch__convert_weight_to_int4pack +torch__convert_weight_to_int4pack <- function(self, innerKTiles) { + args <- mget(x = c("self", "innerKTiles")) +expected_types <- list(self = "Tensor", innerKTiles = "int64_t") +nd_args <- c("self", "innerKTiles") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_convert_weight_to_int4pack', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__convolution torch__convolution <- function(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32) { args <- mget(x = c("input", "weight", "bias", "stride", "padding", "dilation", "transposed", "output_padding", "groups", "benchmark", "deterministic", "cudnn_enabled", "allow_tf32")) @@ -1312,6 +1462,60 @@ fun_type = 'namespace' } +#' @rdname torch__cslt_compress +torch__cslt_compress <- function(input) { + args <- mget(x = c("input")) +expected_types <- list(input = "Tensor") +nd_args <- "input" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_cslt_compress', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__cslt_sparse_mm +torch__cslt_sparse_mm <- function(compressed_A, dense_B, bias = list(), alpha = list(), out_dtype = NULL, transpose_result = FALSE, alg_id = 0L) { + args <- mget(x = c("compressed_A", "dense_B", "bias", "alpha", "out_dtype", "transpose_result", "alg_id")) +expected_types <- list(compressed_A = "Tensor", dense_B = "Tensor", bias = "Tensor", + alpha = "Tensor", out_dtype = "ScalarType", transpose_result = "bool", + alg_id = "int64_t") +nd_args <- c("compressed_A", "dense_B") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_cslt_sparse_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__cslt_sparse_mm_search +torch__cslt_sparse_mm_search <- function(compressed_A, dense_B, bias = list(), alpha = list(), out_dtype = NULL, transpose_result = FALSE) { + args <- mget(x = c("compressed_A", "dense_B", "bias", "alpha", "out_dtype", "transpose_result")) +expected_types <- list(compressed_A = "Tensor", dense_B = "Tensor", bias = "Tensor", + alpha = "Tensor", out_dtype = "ScalarType", transpose_result = "bool") +nd_args <- c("compressed_A", "dense_B") +return_types <- list(list('int64_t')) +call_c_function( +fun_name = '_cslt_sparse_mm_search', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__ctc_loss torch__ctc_loss <- function(log_probs, targets, input_lengths, target_lengths, blank = 0L, zero_infinity = FALSE) { args <- mget(x = c("log_probs", "targets", "input_lengths", "target_lengths", "blank", "zero_infinity")) @@ -1616,74 +1820,6 @@ fun_type = 'namespace' } -#' @rdname torch__cufft_clear_plan_cache -torch__cufft_clear_plan_cache <- function(device_index) { - args <- mget(x = c("device_index")) -expected_types <- list(device_index = "int64_t") -nd_args <- "device_index" -return_types <- list(list("void")) -call_c_function( -fun_name = '_cufft_clear_plan_cache', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch__cufft_get_plan_cache_max_size -torch__cufft_get_plan_cache_max_size <- function(device_index) { - args <- mget(x = c("device_index")) -expected_types <- list(device_index = "int64_t") -nd_args <- "device_index" -return_types <- list(list('int64_t')) -call_c_function( -fun_name = '_cufft_get_plan_cache_max_size', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch__cufft_get_plan_cache_size -torch__cufft_get_plan_cache_size <- function(device_index) { - args <- mget(x = c("device_index")) -expected_types <- list(device_index = "int64_t") -nd_args <- "device_index" -return_types <- list(list('int64_t')) -call_c_function( -fun_name = '_cufft_get_plan_cache_size', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch__cufft_set_plan_cache_max_size -torch__cufft_set_plan_cache_max_size <- function(device_index, max_size) { - args <- mget(x = c("device_index", "max_size")) -expected_types <- list(device_index = "int64_t", max_size = "int64_t") -nd_args <- c("device_index", "max_size") -return_types <- list(list("void")) -call_c_function( -fun_name = '_cufft_set_plan_cache_max_size', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - #' @rdname torch__cummax_helper torch__cummax_helper <- function(self, values, indices, dim) { args <- mget(x = c("self", "values", "indices", "dim")) @@ -1789,13 +1925,20 @@ fun_type = 'namespace' #' @rdname torch__efficient_attention_backward -torch__efficient_attention_backward <- function(grad_out_, query, key, value, out, logsumexp, is_causal = FALSE, chunk_grad_outputs = FALSE) { - args <- mget(x = c("grad_out_", "query", "key", "value", "out", "logsumexp", "is_causal", "chunk_grad_outputs")) +torch__efficient_attention_backward <- function(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale = NULL, num_splits_key = NULL, window_size = NULL, shared_storage_dqdkdv = FALSE) { + args <- mget(x = c("grad_out_", "query", "key", "value", "bias", "out", "cu_seqlens_q", "cu_seqlens_k", "max_seqlen_q", "max_seqlen_k", "logsumexp", "dropout_p", "philox_seed", "philox_offset", "custom_mask_type", "bias_requires_grad", "scale", "num_splits_key", "window_size", "shared_storage_dqdkdv")) expected_types <- list(grad_out_ = "Tensor", query = "Tensor", key = "Tensor", - value = "Tensor", out = "Tensor", logsumexp = "Tensor", is_causal = "bool", - chunk_grad_outputs = "bool") -nd_args <- c("grad_out_", "query", "key", "value", "out", "logsumexp") -return_types <- list(list("Tensor", "Tensor", "Tensor")) + value = "Tensor", bias = "Tensor", out = "Tensor", cu_seqlens_q = "Tensor", + cu_seqlens_k = "Tensor", max_seqlen_q = "int64_t", max_seqlen_k = "int64_t", + logsumexp = "Tensor", dropout_p = "double", philox_seed = "Tensor", + philox_offset = "Tensor", custom_mask_type = "int64_t", bias_requires_grad = "bool", + scale = "double", num_splits_key = "int64_t", window_size = "int64_t", + shared_storage_dqdkdv = "bool") +nd_args <- c("grad_out_", "query", "key", "value", "bias", "out", "cu_seqlens_q", +"cu_seqlens_k", "max_seqlen_q", "max_seqlen_k", "logsumexp", +"dropout_p", "philox_seed", "philox_offset", "custom_mask_type", +"bias_requires_grad") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) call_c_function( fun_name = '_efficient_attention_backward', args = args, @@ -1808,14 +1951,17 @@ fun_type = 'namespace' #' @rdname torch__efficient_attention_forward -torch__efficient_attention_forward <- function(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp = FALSE, causal = FALSE) { - args <- mget(x = c("query", "key", "value", "cu_seqlens_q", "cu_seqlens_k", "max_seqlen_q", "compute_log_sumexp", "causal")) -expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", cu_seqlens_q = "Tensor", - cu_seqlens_k = "Tensor", max_seqlen_q = "int64_t", compute_log_sumexp = "bool", - causal = "bool") -nd_args <- c("query", "key", "value", "cu_seqlens_q", "cu_seqlens_k", "max_seqlen_q" -) -return_types <- list(list("Tensor", "Tensor")) +torch__efficient_attention_forward <- function(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp = FALSE, scale = NULL, seqlen_k = list(), window_size = NULL) { + args <- mget(x = c("query", "key", "value", "bias", "cu_seqlens_q", "cu_seqlens_k", "max_seqlen_q", "max_seqlen_k", "dropout_p", "custom_mask_type", "compute_log_sumexp", "scale", "seqlen_k", "window_size")) +expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", bias = "Tensor", + cu_seqlens_q = "Tensor", cu_seqlens_k = "Tensor", max_seqlen_q = "int64_t", + max_seqlen_k = "int64_t", dropout_p = "double", custom_mask_type = "int64_t", + compute_log_sumexp = "bool", scale = "double", seqlen_k = "Tensor", + window_size = "int64_t") +nd_args <- c("query", "key", "value", "bias", "cu_seqlens_q", "cu_seqlens_k", +"max_seqlen_q", "max_seqlen_k", "dropout_p", "custom_mask_type" +) +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "int64_t", "int64_t")) call_c_function( fun_name = '_efficient_attention_forward', args = args, @@ -2441,14 +2587,33 @@ fun_type = 'namespace' } +#' @rdname torch__fill_mem_eff_dropout_mask_ +torch__fill_mem_eff_dropout_mask_ <- function(self, dropout_p, seed, offset) { + args <- mget(x = c("self", "dropout_p", "seed", "offset")) +expected_types <- list(self = "Tensor", dropout_p = "double", seed = "int64_t", + offset = "int64_t") +nd_args <- c("self", "dropout_p", "seed", "offset") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_fill_mem_eff_dropout_mask_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__flash_attention_backward -torch__flash_attention_backward <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) { - args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset")) +torch__flash_attention_backward <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale = NULL, window_size_left = NULL, window_size_right = NULL) { + args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset", "scale", "window_size_left", "window_size_right")) expected_types <- list(grad_out = "Tensor", query = "Tensor", key = "Tensor", value = "Tensor", out = "Tensor", logsumexp = "Tensor", cum_seq_q = "Tensor", cum_seq_k = "Tensor", max_q = "int64_t", max_k = "int64_t", - dropout_p = "double", is_causal = "bool", philox_seed = "int64_t", - philox_offset = "int64_t") + dropout_p = "double", is_causal = "bool", philox_seed = "Tensor", + philox_offset = "Tensor", scale = "double", window_size_left = "int64_t", + window_size_right = "int64_t") nd_args <- c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset") @@ -2465,14 +2630,16 @@ fun_type = 'namespace' #' @rdname torch__flash_attention_forward -torch__flash_attention_forward <- function(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask) { - args <- mget(x = c("query", "key", "value", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "return_debug_mask")) +torch__flash_attention_forward <- function(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale = NULL, window_size_left = NULL, window_size_right = NULL, seqused_k = list(), alibi_slopes = list()) { + args <- mget(x = c("query", "key", "value", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "return_debug_mask", "scale", "window_size_left", "window_size_right", "seqused_k", "alibi_slopes")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", cum_seq_q = "Tensor", cum_seq_k = "Tensor", max_q = "int64_t", max_k = "int64_t", - dropout_p = "double", is_causal = "bool", return_debug_mask = "bool") + dropout_p = "double", is_causal = "bool", return_debug_mask = "bool", + scale = "double", window_size_left = "int64_t", window_size_right = "int64_t", + seqused_k = "Tensor", alibi_slopes = "Tensor") nd_args <- c("query", "key", "value", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "return_debug_mask") -return_types <- list(list("Tensor", "Tensor", "int64_t", "int64_t", "Tensor")) +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "Tensor")) call_c_function( fun_name = '_flash_attention_forward', args = args, @@ -2624,8 +2791,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_add torch__foreach_add <- function(self, other, scalar, scalars, alpha = 1L) { args <- mget(x = c("self", "other", "scalar", "scalars", "alpha")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef", alpha = "Scalar") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef", alpha = "Scalar") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list('TensorList')) call_c_function( @@ -2642,8 +2809,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_add_ torch__foreach_add_ <- function(self, other, scalar, scalars, alpha = 1L) { args <- mget(x = c("self", "other", "scalar", "scalars", "alpha")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef", alpha = "Scalar") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef", alpha = "Scalar") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -2660,8 +2827,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_add_out torch__foreach_add_out <- function(out, self, other, scalar, scalars, alpha = 1L) { args <- mget(x = c("out", "self", "other", "scalar", "scalars", "alpha")) -expected_types <- list(out = "TensorList", self = "TensorList", other = "TensorList", - scalar = "Scalar", scalars = "ArrayRef", alpha = "Scalar") +expected_types <- list(out = "TensorList", self = "TensorList", other = c("TensorList", +"Tensor"), scalar = "Scalar", scalars = "ArrayRef", alpha = "Scalar") nd_args <- c("out", "self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -3046,6 +3213,58 @@ fun_type = 'namespace' } +#' @rdname torch__foreach_copy +torch__foreach_copy <- function(self, src, non_blocking = FALSE) { + args <- mget(x = c("self", "src", "non_blocking")) +expected_types <- list(self = "TensorList", src = "TensorList", non_blocking = "bool") +nd_args <- c("self", "src") +return_types <- list(list('TensorList')) +call_c_function( +fun_name = '_foreach_copy', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_copy_ +torch__foreach_copy_ <- function(self, src, non_blocking = FALSE) { + args <- mget(x = c("self", "src", "non_blocking")) +expected_types <- list(self = "TensorList", src = "TensorList", non_blocking = "bool") +nd_args <- c("self", "src") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_copy_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_copy_out +torch__foreach_copy_out <- function(out, self, src, non_blocking = FALSE) { + args <- mget(x = c("out", "self", "src", "non_blocking")) +expected_types <- list(out = "TensorList", self = "TensorList", src = "TensorList", + non_blocking = "bool") +nd_args <- c("out", "self", "src") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_copy_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__foreach_cos torch__foreach_cos <- function(self) { args <- mget(x = c("self")) @@ -3151,8 +3370,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_div torch__foreach_div <- function(self, other, scalar, scalars) { args <- mget(x = c("self", "other", "scalar", "scalars")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list('TensorList')) call_c_function( @@ -3169,8 +3388,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_div_ torch__foreach_div_ <- function(self, other, scalar, scalars) { args <- mget(x = c("self", "other", "scalar", "scalars")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -3187,8 +3406,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_div_out torch__foreach_div_out <- function(out, self, other, scalar, scalars) { args <- mget(x = c("out", "self", "other", "scalar", "scalars")) -expected_types <- list(out = "TensorList", self = "TensorList", other = "TensorList", - scalar = "Scalar", scalars = "ArrayRef") +expected_types <- list(out = "TensorList", self = "TensorList", other = c("TensorList", +"Tensor"), scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("out", "self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -3817,6 +4036,40 @@ fun_type = 'namespace' } +#' @rdname torch__foreach_max +torch__foreach_max <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "TensorList") +nd_args <- "self" +return_types <- list(list('TensorList')) +call_c_function( +fun_name = '_foreach_max', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_max_out +torch__foreach_max_out <- function(out, self) { + args <- mget(x = c("out", "self")) +expected_types <- list(out = "TensorList", self = "TensorList") +nd_args <- c("out", "self") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_max_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__foreach_maximum torch__foreach_maximum <- function(self, other, scalar, scalars) { args <- mget(x = c("self", "other", "scalar", "scalars")) @@ -3928,8 +4181,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_mul torch__foreach_mul <- function(self, other, scalar, scalars) { args <- mget(x = c("self", "other", "scalar", "scalars")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list('TensorList')) call_c_function( @@ -3946,8 +4199,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_mul_ torch__foreach_mul_ <- function(self, other, scalar, scalars) { args <- mget(x = c("self", "other", "scalar", "scalars")) -expected_types <- list(self = "TensorList", other = "TensorList", scalar = "Scalar", - scalars = "ArrayRef") +expected_types <- list(self = "TensorList", other = c("TensorList", "Tensor"), + scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -3964,8 +4217,8 @@ fun_type = 'namespace' #' @rdname torch__foreach_mul_out torch__foreach_mul_out <- function(out, self, other, scalar, scalars) { args <- mget(x = c("out", "self", "other", "scalar", "scalars")) -expected_types <- list(out = "TensorList", self = "TensorList", other = "TensorList", - scalar = "Scalar", scalars = "ArrayRef") +expected_types <- list(out = "TensorList", self = "TensorList", other = c("TensorList", +"Tensor"), scalar = "Scalar", scalars = "ArrayRef") nd_args <- c("out", "self", "other", "scalar", "scalars") return_types <- list(list("void")) call_c_function( @@ -4031,9 +4284,9 @@ fun_type = 'namespace' #' @rdname torch__foreach_norm -torch__foreach_norm <- function(self, ord = 2L) { - args <- mget(x = c("self", "ord")) -expected_types <- list(self = "TensorList", ord = "Scalar") +torch__foreach_norm <- function(self, ord = 2L, dtype = NULL) { + args <- mget(x = c("self", "ord", "dtype")) +expected_types <- list(self = "TensorList", ord = "Scalar", dtype = "ScalarType") nd_args <- "self" return_types <- list(list('TensorList')) call_c_function( @@ -4048,9 +4301,10 @@ fun_type = 'namespace' #' @rdname torch__foreach_norm_out -torch__foreach_norm_out <- function(out, self, ord = 2L) { - args <- mget(x = c("out", "self", "ord")) -expected_types <- list(out = "TensorList", self = "TensorList", ord = "Scalar") +torch__foreach_norm_out <- function(out, self, ord = 2L, dtype = NULL) { + args <- mget(x = c("out", "self", "ord", "dtype")) +expected_types <- list(out = "TensorList", self = "TensorList", ord = "Scalar", + dtype = "ScalarType") nd_args <- c("out", "self") return_types <- list(list("void")) call_c_function( @@ -4064,6 +4318,60 @@ fun_type = 'namespace' } +#' @rdname torch__foreach_pow +torch__foreach_pow <- function(self, exponent) { + args <- mget(x = c("self", "exponent")) +expected_types <- list(self = c("TensorList", "Scalar"), exponent = c("TensorList", +"Scalar", "ArrayRef")) +nd_args <- c("self", "exponent") +return_types <- list(list('TensorList')) +call_c_function( +fun_name = '_foreach_pow', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_pow_ +torch__foreach_pow_ <- function(self, exponent) { + args <- mget(x = c("self", "exponent")) +expected_types <- list(self = "TensorList", exponent = c("TensorList", "Scalar", +"ArrayRef")) +nd_args <- c("self", "exponent") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_pow_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_pow_out +torch__foreach_pow_out <- function(out, self, exponent) { + args <- mget(x = c("out", "self", "exponent")) +expected_types <- list(out = "TensorList", self = "TensorList", exponent = c("TensorList", +"Scalar", "ArrayRef")) +nd_args <- c("out", "self", "exponent") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_pow_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__foreach_reciprocal torch__foreach_reciprocal <- function(self) { args <- mget(x = c("self")) @@ -4217,6 +4525,57 @@ fun_type = 'namespace' } +#' @rdname torch__foreach_sign +torch__foreach_sign <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "TensorList") +nd_args <- "self" +return_types <- list(list('TensorList')) +call_c_function( +fun_name = '_foreach_sign', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_sign_ +torch__foreach_sign_ <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "TensorList") +nd_args <- "self" +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_sign_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__foreach_sign_out +torch__foreach_sign_out <- function(out, self) { + args <- mget(x = c("out", "self")) +expected_types <- list(out = "TensorList", self = "TensorList") +nd_args <- c("out", "self") +return_types <- list(list("void")) +call_c_function( +fun_name = '_foreach_sign_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__foreach_sin torch__foreach_sin <- function(self) { args <- mget(x = c("self")) @@ -4628,12 +4987,143 @@ fun_type = 'namespace' } +#' @rdname torch__functional_assert_async +torch__functional_assert_async <- function(self, assert_msg, dep_token) { + args <- mget(x = c("self", "assert_msg", "dep_token")) +expected_types <- list(self = "Tensor", assert_msg = "c10::string_view", dep_token = "Tensor") +nd_args <- c("self", "assert_msg", "dep_token") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_functional_assert_async', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__functional_assert_scalar +torch__functional_assert_scalar <- function(self, assert_msg, dep_token) { + args <- mget(x = c("self", "assert_msg", "dep_token")) +expected_types <- list(self = "Scalar", assert_msg = "c10::string_view", dep_token = "Tensor") +nd_args <- c("self", "assert_msg", "dep_token") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_functional_assert_scalar', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__functional_sym_constrain_range +torch__functional_sym_constrain_range <- function(size, min, max, dep_token) { + args <- mget(x = c("size", "min", "max", "dep_token")) +expected_types <- list(size = "Scalar", min = "int64_t", max = "int64_t", dep_token = "Tensor") +nd_args <- c("size", "min", "max", "dep_token") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_functional_sym_constrain_range', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__functional_sym_constrain_range_for_size +torch__functional_sym_constrain_range_for_size <- function(size, min, max, dep_token) { + args <- mget(x = c("size", "min", "max", "dep_token")) +expected_types <- list(size = "Scalar", min = "int64_t", max = "int64_t", dep_token = "Tensor") +nd_args <- c("size", "min", "max", "dep_token") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_functional_sym_constrain_range_for_size', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__fused_adagrad +torch__fused_adagrad <- function(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("self", "grads", "state_sums", "state_steps", "lr", "lr_decay", "weight_decay", "eps", "maximize", "grad_scale", "found_inf")) +expected_types <- list(self = "TensorList", grads = "TensorList", state_sums = "TensorList", + state_steps = "TensorList", lr = "double", lr_decay = "double", + weight_decay = "double", eps = "double", maximize = "bool", + grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("self", "grads", "state_sums", "state_steps", "lr", "lr_decay", +"weight_decay", "eps", "maximize") +return_types <- list(list("TensorList", "TensorList", "TensorList", "TensorList")) +call_c_function( +fun_name = '_fused_adagrad', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__fused_adagrad_ +torch__fused_adagrad_ <- function(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("self", "grads", "state_sums", "state_steps", "lr", "lr_decay", "weight_decay", "eps", "maximize", "grad_scale", "found_inf")) +expected_types <- list(self = "TensorList", grads = "TensorList", state_sums = "TensorList", + state_steps = "TensorList", lr = "double", lr_decay = "double", + weight_decay = "double", eps = "double", maximize = "bool", + grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("self", "grads", "state_sums", "state_steps", "lr", "lr_decay", +"weight_decay", "eps", "maximize") +return_types <- list(list("void")) +call_c_function( +fun_name = '_fused_adagrad_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__fused_adagrad_out +torch__fused_adagrad_out <- function(out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("out", "self", "grads", "state_sums", "state_steps", "lr", "lr_decay", "weight_decay", "eps", "maximize", "grad_scale", "found_inf")) +expected_types <- list(out = "TensorList", self = "TensorList", grads = "TensorList", + state_sums = "TensorList", state_steps = "TensorList", lr = "double", + lr_decay = "double", weight_decay = "double", eps = "double", + maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("out", "self", "grads", "state_sums", "state_steps", "lr", +"lr_decay", "weight_decay", "eps", "maximize") +return_types <- list(list("void")) +call_c_function( +fun_name = '_fused_adagrad_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__fused_adam torch__fused_adam <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale = list(), found_inf = list()) { args <- mget(x = c("self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4657,7 +5147,7 @@ torch__fused_adam_ <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_s args <- mget(x = c("self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4681,7 +5171,7 @@ torch__fused_adam_out <- function(out, self, grads, exp_avgs, exp_avg_sqs, max_e args <- mget(x = c("out", "self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(out = "TensorList", self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4705,7 +5195,7 @@ torch__fused_adamw <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_s args <- mget(x = c("self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4729,7 +5219,7 @@ torch__fused_adamw_ <- function(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_ args <- mget(x = c("self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4753,7 +5243,7 @@ torch__fused_adamw_out <- function(out, self, grads, exp_avgs, exp_avg_sqs, max_ args <- mget(x = c("out", "self", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps", "lr", "beta1", "beta2", "weight_decay", "eps", "amsgrad", "maximize", "grad_scale", "found_inf")) expected_types <- list(out = "TensorList", self = "TensorList", grads = "TensorList", exp_avgs = "TensorList", exp_avg_sqs = "TensorList", max_exp_avg_sqs = "TensorList", - state_steps = "TensorList", lr = "double", beta1 = "double", + state_steps = "TensorList", lr = c("double", "Tensor"), beta1 = "double", beta2 = "double", weight_decay = "double", eps = "double", amsgrad = "bool", maximize = "bool", grad_scale = "Tensor", found_inf = "Tensor") @@ -4877,10 +5367,11 @@ fun_type = 'namespace' #' @rdname torch__fused_sdp_choice -torch__fused_sdp_choice <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE) { - args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal")) +torch__fused_sdp_choice <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE, scale = NULL, enable_gqa = FALSE) { + args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal", "scale", "enable_gqa")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_mask = "Tensor", - dropout_p = "double", is_causal = "bool") + dropout_p = "double", is_causal = "bool", scale = "double", + enable_gqa = "bool") nd_args <- c("query", "key", "value") return_types <- list(list('int64_t')) call_c_function( @@ -4894,6 +5385,71 @@ fun_type = 'namespace' } +#' @rdname torch__fused_sgd +torch__fused_sgd <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("self", "grads", "momentum_buffer_list", "weight_decay", "momentum", "lr", "dampening", "nesterov", "maximize", "is_first_step", "grad_scale", "found_inf")) +expected_types <- list(self = "TensorList", grads = "TensorList", momentum_buffer_list = "TensorList", + weight_decay = "double", momentum = "double", lr = c("double", + "Tensor"), dampening = "double", nesterov = "bool", maximize = "bool", + is_first_step = "bool", grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("self", "grads", "momentum_buffer_list", "weight_decay", "momentum", +"lr", "dampening", "nesterov", "maximize", "is_first_step") +return_types <- list(list("TensorList", "TensorList", "TensorList")) +call_c_function( +fun_name = '_fused_sgd', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__fused_sgd_ +torch__fused_sgd_ <- function(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("self", "grads", "momentum_buffer_list", "weight_decay", "momentum", "lr", "dampening", "nesterov", "maximize", "is_first_step", "grad_scale", "found_inf")) +expected_types <- list(self = "TensorList", grads = "TensorList", momentum_buffer_list = "TensorList", + weight_decay = "double", momentum = "double", lr = c("double", + "Tensor"), dampening = "double", nesterov = "bool", maximize = "bool", + is_first_step = "bool", grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("self", "grads", "momentum_buffer_list", "weight_decay", "momentum", +"lr", "dampening", "nesterov", "maximize", "is_first_step") +return_types <- list(list("void")) +call_c_function( +fun_name = '_fused_sgd_', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__fused_sgd_out +torch__fused_sgd_out <- function(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale = list(), found_inf = list()) { + args <- mget(x = c("out", "self", "grads", "momentum_buffer_list", "weight_decay", "momentum", "lr", "dampening", "nesterov", "maximize", "is_first_step", "grad_scale", "found_inf")) +expected_types <- list(out = "TensorList", self = "TensorList", grads = "TensorList", + momentum_buffer_list = "TensorList", weight_decay = "double", + momentum = "double", lr = c("double", "Tensor"), dampening = "double", + nesterov = "bool", maximize = "bool", is_first_step = "bool", + grad_scale = "Tensor", found_inf = "Tensor") +nd_args <- c("out", "self", "grads", "momentum_buffer_list", "weight_decay", +"momentum", "lr", "dampening", "nesterov", "maximize", "is_first_step" +) +return_types <- list(list("void")) +call_c_function( +fun_name = '_fused_sgd_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__fw_primal_copy torch__fw_primal_copy <- function(self, level) { args <- mget(x = c("self", "level")) @@ -5148,7 +5704,7 @@ fun_type = 'namespace' #' @rdname torch__index_put_impl torch__index_put_impl <- function(self, indices, values, accumulate = FALSE, unsafe = FALSE) { args <- mget(x = c("self", "indices", "values", "accumulate", "unsafe")) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool", unsafe = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -5166,7 +5722,7 @@ fun_type = 'namespace' #' @rdname torch__index_put_impl_ torch__index_put_impl_ <- function(self, indices, values, accumulate = FALSE, unsafe = FALSE) { args <- mget(x = c("self", "indices", "values", "accumulate", "unsafe")) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool", unsafe = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -5184,7 +5740,7 @@ fun_type = 'namespace' #' @rdname torch__index_put_impl_out torch__index_put_impl_out <- function(out, self, indices, values, accumulate = FALSE, unsafe = FALSE) { args <- mget(x = c("out", "self", "indices", "values", "accumulate", "unsafe")) -expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List> &", +expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool", unsafe = "bool") nd_args <- c("out", "self", "indices", "values") return_types <- list(list('Tensor')) @@ -5233,6 +5789,40 @@ fun_type = 'namespace' } +#' @rdname torch__int_mm +torch__int_mm <- function(self, mat2) { + args <- mget(x = c("self", "mat2")) +expected_types <- list(self = "Tensor", mat2 = "Tensor") +nd_args <- c("self", "mat2") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_int_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__int_mm_out +torch__int_mm_out <- function(out, self, mat2) { + args <- mget(x = c("out", "self", "mat2")) +expected_types <- list(out = "Tensor", self = "Tensor", mat2 = "Tensor") +nd_args <- c("out", "self", "mat2") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_int_mm_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__is_all_true torch__is_all_true <- function(self) { args <- mget(x = c("self")) @@ -5284,6 +5874,41 @@ fun_type = 'namespace' } +#' @rdname torch__jagged_to_padded_dense_forward +torch__jagged_to_padded_dense_forward <- function(values, offsets, max_lengths, padding_value = 0L) { + args <- mget(x = c("values", "offsets", "max_lengths", "padding_value")) +expected_types <- list(values = "Tensor", offsets = "TensorList", max_lengths = "IntArrayRef", + padding_value = "double") +nd_args <- c("values", "offsets", "max_lengths") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_jagged_to_padded_dense_forward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__lazy_clone +torch__lazy_clone <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_lazy_clone', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__linalg_check_errors torch__linalg_check_errors <- function(info, api_name, is_matrix) { args <- mget(x = c("info", "api_name", "is_matrix")) @@ -5370,6 +5995,23 @@ fun_type = 'namespace' } +#' @rdname torch__linalg_eigvals +torch__linalg_eigvals <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_linalg_eigvals', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__linalg_slogdet torch__linalg_slogdet <- function(A) { args <- mget(x = c("A")) @@ -5883,6 +6525,24 @@ fun_type = 'namespace' } +#' @rdname torch__mixed_dtypes_linear +torch__mixed_dtypes_linear <- function(input, weight, scale, bias = list(), activation = NULL) { + args <- mget(x = c("input", "weight", "scale", "bias", "activation")) +expected_types <- list(input = "Tensor", weight = "Tensor", scale = "Tensor", bias = "Tensor", + activation = "c10::string_view") +nd_args <- c("input", "weight", "scale") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_mixed_dtypes_linear', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__mkldnn_reshape torch__mkldnn_reshape <- function(self, shape) { args <- mget(x = c("self", "shape")) @@ -6087,18 +6747,16 @@ fun_type = 'namespace' } -#' @rdname torch__native_batch_norm_legit_out -torch__native_batch_norm_legit_out <- function(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps) { - args <- mget(x = c("out", "save_mean", "save_invstd", "input", "weight", "bias", "running_mean", "running_var", "training", "momentum", "eps")) -expected_types <- list(out = "Tensor", save_mean = "Tensor", save_invstd = "Tensor", - input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", - running_var = "Tensor", training = "bool", momentum = "double", - eps = "double") -nd_args <- c("out", "save_mean", "save_invstd", "input", "weight", "bias", -"running_mean", "running_var", "training", "momentum", "eps") +#' @rdname torch__native_batch_norm_legit_no_training +torch__native_batch_norm_legit_no_training <- function(input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("input", "weight", "bias", "running_mean", "running_var", "momentum", +"eps") return_types <- list(list("Tensor", "Tensor", "Tensor")) call_c_function( -fun_name = '_native_batch_norm_legit_out', +fun_name = '_native_batch_norm_legit_no_training', args = args, expected_types = expected_types, nd_args = nd_args, @@ -6108,19 +6766,17 @@ fun_type = 'namespace' } -#' @rdname torch__native_decoder_only_multi_head_attention -torch__native_decoder_only_multi_head_attention <- function(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask = list(), incr_key = list(), incr_value = list(), need_weights = TRUE, average_attn_weights = TRUE) { - args <- mget(x = c("query", "key", "value", "embed_dim", "num_head", "qkv_weight", "qkv_bias", "proj_weight", "proj_bias", "mask", "incr_key", "incr_value", "need_weights", "average_attn_weights")) -expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", embed_dim = "int64_t", - num_head = "int64_t", qkv_weight = "Tensor", qkv_bias = "Tensor", - proj_weight = "Tensor", proj_bias = "Tensor", mask = "Tensor", - incr_key = "Tensor", incr_value = "Tensor", need_weights = "bool", - average_attn_weights = "bool") -nd_args <- c("query", "key", "value", "embed_dim", "num_head", "qkv_weight", -"qkv_bias", "proj_weight", "proj_bias") -return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +#' @rdname torch__native_batch_norm_legit_no_training_out +torch__native_batch_norm_legit_no_training_out <- function(out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps) { + args <- mget(x = c("out0", "out1", "out2", "input", "weight", "bias", "running_mean", "running_var", "momentum", "eps")) +expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", input = "Tensor", + weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", momentum = "double", eps = "double") +nd_args <- c("out0", "out1", "out2", "input", "weight", "bias", "running_mean", +"running_var", "momentum", "eps") +return_types <- list(list("Tensor", "Tensor", "Tensor")) call_c_function( -fun_name = '_native_decoder_only_multi_head_attention', +fun_name = '_native_batch_norm_legit_no_training_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -6130,21 +6786,18 @@ fun_type = 'namespace' } -#' @rdname torch__native_decoder_only_multi_head_attention_out -torch__native_decoder_only_multi_head_attention_out <- function(out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask = list(), incr_key = list(), incr_value = list(), need_weights = TRUE, average_attn_weights = TRUE) { - args <- mget(x = c("out0", "out1", "out2", "out3", "query", "key", "value", "embed_dim", "num_head", "qkv_weight", "qkv_bias", "proj_weight", "proj_bias", "mask", "incr_key", "incr_value", "need_weights", "average_attn_weights")) -expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", out3 = "Tensor", - query = "Tensor", key = "Tensor", value = "Tensor", embed_dim = "int64_t", - num_head = "int64_t", qkv_weight = "Tensor", qkv_bias = "Tensor", - proj_weight = "Tensor", proj_bias = "Tensor", mask = "Tensor", - incr_key = "Tensor", incr_value = "Tensor", need_weights = "bool", - average_attn_weights = "bool") -nd_args <- c("out0", "out1", "out2", "out3", "query", "key", "value", "embed_dim", -"num_head", "qkv_weight", "qkv_bias", "proj_weight", "proj_bias" -) -return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +#' @rdname torch__native_batch_norm_legit_out +torch__native_batch_norm_legit_out <- function(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps) { + args <- mget(x = c("out", "save_mean", "save_invstd", "input", "weight", "bias", "running_mean", "running_var", "training", "momentum", "eps")) +expected_types <- list(out = "Tensor", save_mean = "Tensor", save_invstd = "Tensor", + input = "Tensor", weight = "Tensor", bias = "Tensor", running_mean = "Tensor", + running_var = "Tensor", training = "bool", momentum = "double", + eps = "double") +nd_args <- c("out", "save_mean", "save_invstd", "input", "weight", "bias", +"running_mean", "running_var", "training", "momentum", "eps") +return_types <- list(list("Tensor", "Tensor", "Tensor")) call_c_function( -fun_name = '_native_decoder_only_multi_head_attention_out', +fun_name = '_native_batch_norm_legit_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -6248,6 +6901,23 @@ fun_type = 'namespace' } +#' @rdname torch__nested_compute_contiguous_strides_offsets +torch__nested_compute_contiguous_strides_offsets <- function(nested_size) { + args <- mget(x = c("nested_size")) +expected_types <- list(nested_size = "Tensor") +nd_args <- "nested_size" +return_types <- list(list("Tensor", "Tensor")) +call_c_function( +fun_name = '_nested_compute_contiguous_strides_offsets', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__nested_from_padded torch__nested_from_padded <- function(padded, cpu_nested_shape_example, fuse_transform_0213 = FALSE) { args <- mget(x = c("padded", "cpu_nested_shape_example", "fuse_transform_0213")) @@ -6318,6 +6988,159 @@ fun_type = 'namespace' } +#' @rdname torch__nested_get_jagged_dummy +torch__nested_get_jagged_dummy <- function(any) { + args <- mget(x = c("any")) +expected_types <- list(any = "Tensor") +nd_args <- "any" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_jagged_dummy', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_lengths +torch__nested_get_lengths <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_lengths', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_max_seqlen +torch__nested_get_max_seqlen <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_max_seqlen', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_min_seqlen +torch__nested_get_min_seqlen <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_min_seqlen', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_offsets +torch__nested_get_offsets <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_offsets', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_ragged_idx +torch__nested_get_ragged_idx <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('int64_t')) +call_c_function( +fun_name = '_nested_get_ragged_idx', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_values +torch__nested_get_values <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_values', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_values_copy +torch__nested_get_values_copy <- function(self) { + args <- mget(x = c("self")) +expected_types <- list(self = "Tensor") +nd_args <- "self" +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_values_copy', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_get_values_copy_out +torch__nested_get_values_copy_out <- function(out, self) { + args <- mget(x = c("out", "self")) +expected_types <- list(out = "Tensor", self = "Tensor") +nd_args <- c("out", "self") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_get_values_copy_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__nested_select_backward torch__nested_select_backward <- function(grad_output, self, dim, index) { args <- mget(x = c("grad_output", "self", "dim", "index")) @@ -6474,6 +7297,23 @@ fun_type = 'namespace' } +#' @rdname torch__nested_tensor_storage_offsets_out +torch__nested_tensor_storage_offsets_out <- function(out, self) { + args <- mget(x = c("out", "self")) +expected_types <- list(out = "Tensor", self = "Tensor") +nd_args <- c("out", "self") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_tensor_storage_offsets_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__nested_tensor_strides_out torch__nested_tensor_strides_out <- function(out, self) { args <- mget(x = c("out", "self")) @@ -6495,7 +7335,7 @@ fun_type = 'namespace' torch__nested_view_from_buffer <- function(self, nested_size, nested_strides, offsets) { args <- mget(x = c("self", "nested_size", "nested_strides", "offsets")) expected_types <- list(self = "Tensor", nested_size = "Tensor", nested_strides = "Tensor", - offsets = "IntArrayRef") + offsets = "Tensor") nd_args <- c("self", "nested_size", "nested_strides", "offsets") return_types <- list(list('Tensor')) call_c_function( @@ -6513,7 +7353,7 @@ fun_type = 'namespace' torch__nested_view_from_buffer_copy <- function(self, nested_size, nested_strides, offsets) { args <- mget(x = c("self", "nested_size", "nested_strides", "offsets")) expected_types <- list(self = "Tensor", nested_size = "Tensor", nested_strides = "Tensor", - offsets = "IntArrayRef") + offsets = "Tensor") nd_args <- c("self", "nested_size", "nested_strides", "offsets") return_types <- list(list('Tensor')) call_c_function( @@ -6531,7 +7371,7 @@ fun_type = 'namespace' torch__nested_view_from_buffer_copy_out <- function(out, self, nested_size, nested_strides, offsets) { args <- mget(x = c("out", "self", "nested_size", "nested_strides", "offsets")) expected_types <- list(out = "Tensor", self = "Tensor", nested_size = "Tensor", - nested_strides = "Tensor", offsets = "IntArrayRef") + nested_strides = "Tensor", offsets = "Tensor") nd_args <- c("out", "self", "nested_size", "nested_strides", "offsets") return_types <- list(list('Tensor')) call_c_function( @@ -6545,6 +7385,61 @@ fun_type = 'namespace' } +#' @rdname torch__nested_view_from_jagged +torch__nested_view_from_jagged <- function(self, offsets, dummy, lengths = list(), ragged_idx = 1L, min_seqlen = list(), max_seqlen = list()) { + args <- mget(x = c("self", "offsets", "dummy", "lengths", "ragged_idx", "min_seqlen", "max_seqlen")) +expected_types <- list(self = "Tensor", offsets = "Tensor", dummy = "Tensor", lengths = "Tensor", + ragged_idx = "int64_t", min_seqlen = "Tensor", max_seqlen = "Tensor") +nd_args <- c("self", "offsets", "dummy") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_view_from_jagged', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_view_from_jagged_copy +torch__nested_view_from_jagged_copy <- function(self, offsets, dummy, lengths = list(), ragged_idx = 1L, min_seqlen = list(), max_seqlen = list()) { + args <- mget(x = c("self", "offsets", "dummy", "lengths", "ragged_idx", "min_seqlen", "max_seqlen")) +expected_types <- list(self = "Tensor", offsets = "Tensor", dummy = "Tensor", lengths = "Tensor", + ragged_idx = "int64_t", min_seqlen = "Tensor", max_seqlen = "Tensor") +nd_args <- c("self", "offsets", "dummy") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_view_from_jagged_copy', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__nested_view_from_jagged_copy_out +torch__nested_view_from_jagged_copy_out <- function(out, self, offsets, dummy, lengths = list(), ragged_idx = 1L, min_seqlen = list(), max_seqlen = list()) { + args <- mget(x = c("out", "self", "offsets", "dummy", "lengths", "ragged_idx", "min_seqlen", "max_seqlen")) +expected_types <- list(out = "Tensor", self = "Tensor", offsets = "Tensor", dummy = "Tensor", + lengths = "Tensor", ragged_idx = "int64_t", min_seqlen = "Tensor", + max_seqlen = "Tensor") +nd_args <- c("out", "self", "offsets", "dummy") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_nested_view_from_jagged_copy_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__new_zeros_with_same_feature_meta torch__new_zeros_with_same_feature_meta <- function(self, other, self_num_batch_dims = 0L) { args <- mget(x = c("self", "other", "self_num_batch_dims")) @@ -6722,6 +7617,23 @@ fun_type = 'namespace' } +#' @rdname torch__padded_dense_to_jagged_forward +torch__padded_dense_to_jagged_forward <- function(dense, offsets, total_L = NULL) { + args <- mget(x = c("dense", "offsets", "total_L")) +expected_types <- list(dense = "Tensor", offsets = "TensorList", total_L = "int64_t") +nd_args <- c("dense", "offsets") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_padded_dense_to_jagged_forward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__pdist_backward torch__pdist_backward <- function(grad, self, p, pdist) { args <- mget(x = c("grad", "self", "p", "pdist")) @@ -6859,6 +7771,40 @@ fun_type = 'namespace' } +#' @rdname torch__print +torch__print <- function(s) { + args <- mget(x = c("s")) +expected_types <- list(s = "c10::string_view") +nd_args <- "s" +return_types <- list(list("void")) +call_c_function( +fun_name = '_print', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__propagate_xla_data +torch__propagate_xla_data <- function(input, output) { + args <- mget(x = c("input", "output")) +expected_types <- list(input = "Tensor", output = "Tensor") +nd_args <- c("input", "output") +return_types <- list(list("void")) +call_c_function( +fun_name = '_propagate_xla_data', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__remove_batch_dim torch__remove_batch_dim <- function(self, level, batch_size, out_dim) { args <- mget(x = c("self", "level", "batch_size", "out_dim")) @@ -7030,6 +7976,23 @@ fun_type = 'namespace' } +#' @rdname torch__safe_softmax +torch__safe_softmax <- function(self, dim, dtype = NULL) { + args <- mget(x = c("self", "dim", "dtype")) +expected_types <- list(self = "Tensor", dim = "int64_t", dtype = "ScalarType") +nd_args <- c("self", "dim") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_safe_softmax', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__sample_dirichlet torch__sample_dirichlet <- function(self, generator = NULL) { args <- mget(x = c("self", "generator")) @@ -7081,15 +8044,16 @@ fun_type = 'namespace' } -#' @rdname torch__scaled_dot_product_attention -torch__scaled_dot_product_attention <- function(query, key, value, attn_mask = list(), dropout_p = 0L, need_attn_weights = FALSE, is_causal = FALSE) { - args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "need_attn_weights", "is_causal")) +#' @rdname torch__scaled_dot_product_attention_math +torch__scaled_dot_product_attention_math <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE, dropout_mask = list(), scale = NULL, enable_gqa = FALSE) { + args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal", "dropout_mask", "scale", "enable_gqa")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_mask = "Tensor", - dropout_p = "double", need_attn_weights = "bool", is_causal = "bool") + dropout_p = "double", is_causal = "bool", dropout_mask = "Tensor", + scale = "double", enable_gqa = "bool") nd_args <- c("query", "key", "value") return_types <- list(list("Tensor", "Tensor")) call_c_function( -fun_name = '_scaled_dot_product_attention', +fun_name = '_scaled_dot_product_attention_math', args = args, expected_types = expected_types, nd_args = nd_args, @@ -7099,15 +8063,58 @@ fun_type = 'namespace' } -#' @rdname torch__scaled_dot_product_attention_math -torch__scaled_dot_product_attention_math <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE, dropout_mask = list()) { - args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal", "dropout_mask")) +#' @rdname torch__scaled_dot_product_attention_math_for_mps +torch__scaled_dot_product_attention_math_for_mps <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE, dropout_mask = list(), scale = NULL) { + args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal", "dropout_mask", "scale")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_mask = "Tensor", - dropout_p = "double", is_causal = "bool", dropout_mask = "Tensor") + dropout_p = "double", is_causal = "bool", dropout_mask = "Tensor", + scale = "double") nd_args <- c("query", "key", "value") return_types <- list(list("Tensor", "Tensor")) call_c_function( -fun_name = '_scaled_dot_product_attention_math', +fun_name = '_scaled_dot_product_attention_math_for_mps', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_dot_product_cudnn_attention +torch__scaled_dot_product_cudnn_attention <- function(query, key, value, attn_bias, compute_log_sumexp, dropout_p = 0L, is_causal = FALSE, return_debug_mask = FALSE, scale = NULL) { + args <- mget(x = c("query", "key", "value", "attn_bias", "compute_log_sumexp", "dropout_p", "is_causal", "return_debug_mask", "scale")) +expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_bias = "Tensor", + compute_log_sumexp = "bool", dropout_p = "double", is_causal = "bool", + return_debug_mask = "bool", scale = "double") +nd_args <- c("query", "key", "value", "attn_bias", "compute_log_sumexp") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "int64_t", "int64_t", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_cudnn_attention', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_dot_product_cudnn_attention_backward +torch__scaled_dot_product_cudnn_attention_backward <- function(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale = NULL) { + args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "philox_seed", "philox_offset", "attn_bias", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "scale")) +expected_types <- list(grad_out = "Tensor", query = "Tensor", key = "Tensor", value = "Tensor", + out = "Tensor", logsumexp = "Tensor", philox_seed = "Tensor", + philox_offset = "Tensor", attn_bias = "Tensor", cum_seq_q = "Tensor", + cum_seq_k = "Tensor", max_q = "int64_t", max_k = "int64_t", + dropout_p = "double", is_causal = "bool", scale = "double") +nd_args <- c("grad_out", "query", "key", "value", "out", "logsumexp", "philox_seed", +"philox_offset", "attn_bias", "cum_seq_q", "cum_seq_k", "max_q", +"max_k", "dropout_p", "is_causal") +return_types <- list(list("Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_cudnn_attention_backward', args = args, expected_types = expected_types, nd_args = nd_args, @@ -7118,12 +8125,13 @@ fun_type = 'namespace' #' @rdname torch__scaled_dot_product_efficient_attention -torch__scaled_dot_product_efficient_attention <- function(query, key, value, compute_log_sumexp, is_causal = FALSE) { - args <- mget(x = c("query", "key", "value", "compute_log_sumexp", "is_causal")) -expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", compute_log_sumexp = "bool", - is_causal = "bool") -nd_args <- c("query", "key", "value", "compute_log_sumexp") -return_types <- list(list("Tensor", "Tensor")) +torch__scaled_dot_product_efficient_attention <- function(query, key, value, attn_bias, compute_log_sumexp, dropout_p = 0L, is_causal = FALSE, scale = NULL) { + args <- mget(x = c("query", "key", "value", "attn_bias", "compute_log_sumexp", "dropout_p", "is_causal", "scale")) +expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_bias = "Tensor", + compute_log_sumexp = "bool", dropout_p = "double", is_causal = "bool", + scale = "double") +nd_args <- c("query", "key", "value", "attn_bias", "compute_log_sumexp") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) call_c_function( fun_name = '_scaled_dot_product_efficient_attention', args = args, @@ -7136,13 +8144,17 @@ fun_type = 'namespace' #' @rdname torch__scaled_dot_product_efficient_attention_backward -torch__scaled_dot_product_efficient_attention_backward <- function(grad_out_, query, key, value, out, logsumexp, is_causal = FALSE, chunk_grad_outputs = FALSE) { - args <- mget(x = c("grad_out_", "query", "key", "value", "out", "logsumexp", "is_causal", "chunk_grad_outputs")) +torch__scaled_dot_product_efficient_attention_backward <- function(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal = FALSE, scale = NULL) { + args <- mget(x = c("grad_out_", "query", "key", "value", "attn_bias", "out", "logsumexp", "philox_seed", "philox_offset", "dropout_p", "grad_input_mask", "is_causal", "scale")) expected_types <- list(grad_out_ = "Tensor", query = "Tensor", key = "Tensor", - value = "Tensor", out = "Tensor", logsumexp = "Tensor", is_causal = "bool", - chunk_grad_outputs = "bool") -nd_args <- c("grad_out_", "query", "key", "value", "out", "logsumexp") -return_types <- list(list("Tensor", "Tensor", "Tensor")) + value = "Tensor", attn_bias = "Tensor", out = "Tensor", logsumexp = "Tensor", + philox_seed = "Tensor", philox_offset = "Tensor", dropout_p = "double", + grad_input_mask = "::std::array", is_causal = "bool", + scale = "double") +nd_args <- c("grad_out_", "query", "key", "value", "attn_bias", "out", "logsumexp", +"philox_seed", "philox_offset", "dropout_p", "grad_input_mask" +) +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) call_c_function( fun_name = '_scaled_dot_product_efficient_attention_backward', args = args, @@ -7155,12 +8167,12 @@ fun_type = 'namespace' #' @rdname torch__scaled_dot_product_flash_attention -torch__scaled_dot_product_flash_attention <- function(query, key, value, dropout_p = 0L, is_causal = FALSE, return_debug_mask = FALSE) { - args <- mget(x = c("query", "key", "value", "dropout_p", "is_causal", "return_debug_mask")) +torch__scaled_dot_product_flash_attention <- function(query, key, value, dropout_p = 0L, is_causal = FALSE, return_debug_mask = FALSE, scale = NULL) { + args <- mget(x = c("query", "key", "value", "dropout_p", "is_causal", "return_debug_mask", "scale")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", dropout_p = "double", - is_causal = "bool", return_debug_mask = "bool") + is_causal = "bool", return_debug_mask = "bool", scale = "double") nd_args <- c("query", "key", "value") -return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "int64_t", "int64_t", "int64_t", "int64_t", "Tensor")) +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "int64_t", "int64_t", "Tensor", "Tensor", "Tensor")) call_c_function( fun_name = '_scaled_dot_product_flash_attention', args = args, @@ -7173,13 +8185,13 @@ fun_type = 'namespace' #' @rdname torch__scaled_dot_product_flash_attention_backward -torch__scaled_dot_product_flash_attention_backward <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset) { - args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset")) +torch__scaled_dot_product_flash_attention_backward <- function(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale = NULL) { + args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset", "scale")) expected_types <- list(grad_out = "Tensor", query = "Tensor", key = "Tensor", value = "Tensor", out = "Tensor", logsumexp = "Tensor", cum_seq_q = "Tensor", cum_seq_k = "Tensor", max_q = "int64_t", max_k = "int64_t", - dropout_p = "double", is_causal = "bool", philox_seed = "int64_t", - philox_offset = "int64_t") + dropout_p = "double", is_causal = "bool", philox_seed = "Tensor", + philox_offset = "Tensor", scale = "double") nd_args <- c("grad_out", "query", "key", "value", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset") @@ -7195,6 +8207,125 @@ fun_type = 'namespace' } +#' @rdname torch__scaled_dot_product_flash_attention_for_cpu +torch__scaled_dot_product_flash_attention_for_cpu <- function(query, key, value, dropout_p = 0L, is_causal = FALSE, attn_mask = list(), scale = NULL) { + args <- mget(x = c("query", "key", "value", "dropout_p", "is_causal", "attn_mask", "scale")) +expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", dropout_p = "double", + is_causal = "bool", attn_mask = "Tensor", scale = "double") +nd_args <- c("query", "key", "value") +return_types <- list(list("Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_flash_attention_for_cpu', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_dot_product_flash_attention_for_cpu_backward +torch__scaled_dot_product_flash_attention_for_cpu_backward <- function(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask = list(), scale = NULL) { + args <- mget(x = c("grad_out", "query", "key", "value", "out", "logsumexp", "dropout_p", "is_causal", "attn_mask", "scale")) +expected_types <- list(grad_out = "Tensor", query = "Tensor", key = "Tensor", value = "Tensor", + out = "Tensor", logsumexp = "Tensor", dropout_p = "double", + is_causal = "bool", attn_mask = "Tensor", scale = "double") +nd_args <- c("grad_out", "query", "key", "value", "out", "logsumexp", "dropout_p", +"is_causal") +return_types <- list(list("Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_flash_attention_for_cpu_backward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_dot_product_fused_attention_overrideable +torch__scaled_dot_product_fused_attention_overrideable <- function(query, key, value, attn_bias = list(), dropout_p = 0L, is_causal = FALSE, return_debug_mask = FALSE, scale = NULL) { + args <- mget(x = c("query", "key", "value", "attn_bias", "dropout_p", "is_causal", "return_debug_mask", "scale")) +expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_bias = "Tensor", + dropout_p = "double", is_causal = "bool", return_debug_mask = "bool", + scale = "double") +nd_args <- c("query", "key", "value") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "int64_t", "int64_t", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_fused_attention_overrideable', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_dot_product_fused_attention_overrideable_backward +torch__scaled_dot_product_fused_attention_overrideable_backward <- function(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale = NULL) { + args <- mget(x = c("grad_out", "query", "key", "value", "attn_bias", "grad_input_mask", "out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", "dropout_p", "is_causal", "philox_seed", "philox_offset", "scale")) +expected_types <- list(grad_out = "Tensor", query = "Tensor", key = "Tensor", value = "Tensor", + attn_bias = "Tensor", grad_input_mask = "::std::array", + out = "Tensor", logsumexp = "Tensor", cum_seq_q = "Tensor", + cum_seq_k = "Tensor", max_q = "int64_t", max_k = "int64_t", + dropout_p = "double", is_causal = "bool", philox_seed = "Tensor", + philox_offset = "Tensor", scale = "double") +nd_args <- c("grad_out", "query", "key", "value", "attn_bias", "grad_input_mask", +"out", "logsumexp", "cum_seq_q", "cum_seq_k", "max_q", "max_k", +"dropout_p", "is_causal", "philox_seed", "philox_offset") +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_scaled_dot_product_fused_attention_overrideable_backward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_mm +torch__scaled_mm <- function(self, mat2, scale_a, scale_b, bias = list(), scale_result = list(), out_dtype = NULL, use_fast_accum = FALSE) { + args <- mget(x = c("self", "mat2", "scale_a", "scale_b", "bias", "scale_result", "out_dtype", "use_fast_accum")) +expected_types <- list(self = "Tensor", mat2 = "Tensor", scale_a = "Tensor", scale_b = "Tensor", + bias = "Tensor", scale_result = "Tensor", out_dtype = "ScalarType", + use_fast_accum = "bool") +nd_args <- c("self", "mat2", "scale_a", "scale_b") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_scaled_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__scaled_mm_out +torch__scaled_mm_out <- function(out, self, mat2, scale_a, scale_b, bias = list(), scale_result = list(), out_dtype = NULL, use_fast_accum = FALSE) { + args <- mget(x = c("out", "self", "mat2", "scale_a", "scale_b", "bias", "scale_result", "out_dtype", "use_fast_accum")) +expected_types <- list(out = "Tensor", self = "Tensor", mat2 = "Tensor", scale_a = "Tensor", + scale_b = "Tensor", bias = "Tensor", scale_result = "Tensor", + out_dtype = "ScalarType", use_fast_accum = "bool") +nd_args <- c("out", "self", "mat2", "scale_a", "scale_b") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_scaled_mm_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__segment_reduce_backward torch__segment_reduce_backward <- function(grad, output, data, reduce, lengths = list(), offsets = list(), axis = 0L, initial = NULL) { args <- mget(x = c("grad", "output", "data", "reduce", "lengths", "offsets", "axis", "initial")) @@ -7613,11 +8744,30 @@ fun_type = 'namespace' } +#' @rdname torch__sparse_compressed_tensor_with_dims +torch__sparse_compressed_tensor_with_dims <- function(nnz, dense_dim, size, blocksize, index_dtype, options) { + args <- mget(x = c("nnz", "dense_dim", "size", "blocksize", "index_dtype", "options")) +expected_types <- list(nnz = "int64_t", dense_dim = "int64_t", size = "IntArrayRef", + blocksize = "IntArrayRef", index_dtype = "ScalarType", options = "TensorOptions") +nd_args <- c("nnz", "dense_dim", "size", "blocksize", "index_dtype", "options" +) +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_compressed_tensor_with_dims', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__sparse_coo_tensor_unsafe -torch__sparse_coo_tensor_unsafe <- function(indices, values, size, options = list()) { - args <- mget(x = c("indices", "values", "size", "options")) +torch__sparse_coo_tensor_unsafe <- function(indices, values, size, options = list(), is_coalesced = NULL) { + args <- mget(x = c("indices", "values", "size", "options", "is_coalesced")) expected_types <- list(indices = "Tensor", values = "Tensor", size = "IntArrayRef", - options = "TensorOptions") + options = "TensorOptions", is_coalesced = "bool") nd_args <- c("indices", "values", "size") return_types <- list(list('Tensor')) call_c_function( @@ -7650,10 +8800,11 @@ fun_type = 'namespace' #' @rdname torch__sparse_coo_tensor_with_dims_and_tensors -torch__sparse_coo_tensor_with_dims_and_tensors <- function(sparse_dim, dense_dim, size, indices, values, options) { - args <- mget(x = c("sparse_dim", "dense_dim", "size", "indices", "values", "options")) +torch__sparse_coo_tensor_with_dims_and_tensors <- function(sparse_dim, dense_dim, size, indices, values, options, is_coalesced = NULL) { + args <- mget(x = c("sparse_dim", "dense_dim", "size", "indices", "values", "options", "is_coalesced")) expected_types <- list(sparse_dim = "int64_t", dense_dim = "int64_t", size = "IntArrayRef", - indices = "Tensor", values = "Tensor", options = "TensorOptions") + indices = "Tensor", values = "Tensor", options = "TensorOptions", + is_coalesced = "bool") nd_args <- c("sparse_dim", "dense_dim", "size", "indices", "values", "options" ) return_types <- list(list('Tensor')) @@ -7669,10 +8820,11 @@ fun_type = 'namespace' #' @rdname torch__sparse_coo_tensor_with_dims_and_tensors_out -torch__sparse_coo_tensor_with_dims_and_tensors_out <- function(out, sparse_dim, dense_dim, size, indices, values) { - args <- mget(x = c("out", "sparse_dim", "dense_dim", "size", "indices", "values")) +torch__sparse_coo_tensor_with_dims_and_tensors_out <- function(out, sparse_dim, dense_dim, size, indices, values, is_coalesced = NULL) { + args <- mget(x = c("out", "sparse_dim", "dense_dim", "size", "indices", "values", "is_coalesced")) expected_types <- list(out = "Tensor", sparse_dim = "int64_t", dense_dim = "int64_t", - size = "IntArrayRef", indices = "Tensor", values = "Tensor") + size = "IntArrayRef", indices = "Tensor", values = "Tensor", + is_coalesced = "bool") nd_args <- c("out", "sparse_dim", "dense_dim", "size", "indices", "values" ) return_types <- list(list('Tensor')) @@ -7884,6 +9036,23 @@ fun_type = 'namespace' } +#' @rdname torch__sparse_mask_projection_out +torch__sparse_mask_projection_out <- function(out, self, mask, accumulate_matches = FALSE) { + args <- mget(x = c("out", "self", "mask", "accumulate_matches")) +expected_types <- list(out = "Tensor", self = "Tensor", mask = "Tensor", accumulate_matches = "bool") +nd_args <- c("out", "self", "mask") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_mask_projection_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__sparse_mm torch__sparse_mm <- function(sparse, dense, reduce) { args <- mget(x = c("sparse", "dense", "reduce")) @@ -7937,6 +9106,111 @@ fun_type = 'namespace' } +#' @rdname torch__sparse_semi_structured_addmm +torch__sparse_semi_structured_addmm <- function(input, mat1, mat1_meta, mat2, alpha = 1L, beta = 1L, out_dtype = NULL) { + args <- mget(x = c("input", "mat1", "mat1_meta", "mat2", "alpha", "beta", "out_dtype")) +expected_types <- list(input = "Tensor", mat1 = "Tensor", mat1_meta = "Tensor", + mat2 = "Tensor", alpha = "Scalar", beta = "Scalar", out_dtype = "ScalarType") +nd_args <- c("input", "mat1", "mat1_meta", "mat2") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_semi_structured_addmm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__sparse_semi_structured_apply +torch__sparse_semi_structured_apply <- function(input, thread_masks) { + args <- mget(x = c("input", "thread_masks")) +expected_types <- list(input = "Tensor", thread_masks = "Tensor") +nd_args <- c("input", "thread_masks") +return_types <- list(list("Tensor", "Tensor")) +call_c_function( +fun_name = '_sparse_semi_structured_apply', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__sparse_semi_structured_apply_dense +torch__sparse_semi_structured_apply_dense <- function(input, thread_masks) { + args <- mget(x = c("input", "thread_masks")) +expected_types <- list(input = "Tensor", thread_masks = "Tensor") +nd_args <- c("input", "thread_masks") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_semi_structured_apply_dense', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__sparse_semi_structured_linear +torch__sparse_semi_structured_linear <- function(input, weight, meta, bias = list(), activation = NULL, out_dtype = NULL) { + args <- mget(x = c("input", "weight", "meta", "bias", "activation", "out_dtype")) +expected_types <- list(input = "Tensor", weight = "Tensor", meta = "Tensor", bias = "Tensor", + activation = "c10::string_view", out_dtype = "ScalarType") +nd_args <- c("input", "weight", "meta") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_semi_structured_linear', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__sparse_semi_structured_mm +torch__sparse_semi_structured_mm <- function(mat1, mat1_meta, mat2, out_dtype = NULL) { + args <- mget(x = c("mat1", "mat1_meta", "mat2", "out_dtype")) +expected_types <- list(mat1 = "Tensor", mat1_meta = "Tensor", mat2 = "Tensor", + out_dtype = "ScalarType") +nd_args <- c("mat1", "mat1_meta", "mat2") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_sparse_semi_structured_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__sparse_semi_structured_tile +torch__sparse_semi_structured_tile <- function(input, algorithm = "", use_cutlass = TRUE) { + args <- mget(x = c("input", "algorithm", "use_cutlass")) +expected_types <- list(input = "Tensor", algorithm = "c10::string_view", use_cutlass = "bool") +nd_args <- "input" +return_types <- list(list("Tensor", "Tensor", "Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = '_sparse_semi_structured_tile', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__sparse_softmax torch__sparse_softmax <- function(self, dim, dtype = NULL, half_to_float) { args <- mget(x = c("self", "dim", "dtype", "half_to_float")) @@ -8146,6 +9420,23 @@ fun_type = 'namespace' } +#' @rdname torch__spsolve +torch__spsolve <- function(A, B, left = TRUE) { + args <- mget(x = c("A", "B", "left")) +expected_types <- list(A = "Tensor", B = "Tensor", left = "bool") +nd_args <- c("A", "B") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_spsolve', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__stack torch__stack <- function(tensors, dim = 1L) { args <- mget(x = c("tensors", "dim")) @@ -8350,6 +9641,40 @@ fun_type = 'namespace' } +#' @rdname torch__test_functorch_fallback +torch__test_functorch_fallback <- function(self, other) { + args <- mget(x = c("self", "other")) +expected_types <- list(self = "Tensor", other = "Tensor") +nd_args <- c("self", "other") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_test_functorch_fallback', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__test_functorch_fallback_out +torch__test_functorch_fallback_out <- function(out, self, other) { + args <- mget(x = c("out", "self", "other")) +expected_types <- list(out = "Tensor", self = "Tensor", other = "Tensor") +nd_args <- c("out", "self", "other") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_test_functorch_fallback_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__test_optional_filled_intlist torch__test_optional_filled_intlist <- function(values, addends) { args <- mget(x = c("values", "addends")) @@ -8452,6 +9777,23 @@ fun_type = 'namespace' } +#' @rdname torch__test_parallel_materialize +torch__test_parallel_materialize <- function(self, num_parallel, skip_first = FALSE) { + args <- mget(x = c("self", "num_parallel", "skip_first")) +expected_types <- list(self = "Tensor", num_parallel = "int64_t", skip_first = "bool") +nd_args <- c("self", "num_parallel") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_test_parallel_materialize', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__test_serialization_subcmul torch__test_serialization_subcmul <- function(self, other, alpha = 1L) { args <- mget(x = c("self", "other", "alpha")) @@ -8764,9 +10106,9 @@ fun_type = 'namespace' #' @rdname torch__to_dense_out -torch__to_dense_out <- function(out, self, dtype = NULL) { - args <- mget(x = c("out", "self", "dtype")) -expected_types <- list(out = "Tensor", self = "Tensor", dtype = "ScalarType") +torch__to_dense_out <- function(out, self, dtype = NULL, masked_grad = NULL) { + args <- mget(x = c("out", "self", "dtype", "masked_grad")) +expected_types <- list(out = "Tensor", self = "Tensor", dtype = "ScalarType", masked_grad = "bool") nd_args <- c("out", "self") return_types <- list(list('Tensor')) call_c_function( @@ -8780,14 +10122,15 @@ fun_type = 'namespace' } -#' @rdname torch__transform_bias_rescale_qkv -torch__transform_bias_rescale_qkv <- function(qkv, qkv_bias, num_heads) { - args <- mget(x = c("qkv", "qkv_bias", "num_heads")) -expected_types <- list(qkv = "Tensor", qkv_bias = "Tensor", num_heads = "int64_t") -nd_args <- c("qkv", "qkv_bias", "num_heads") -return_types <- list(list("Tensor", "Tensor", "Tensor")) +#' @rdname torch__to_sparse_bsc_out +torch__to_sparse_bsc_out <- function(out, self, blocksize, dense_dim = NULL) { + args <- mget(x = c("out", "self", "blocksize", "dense_dim")) +expected_types <- list(out = "Tensor", self = "Tensor", blocksize = "IntArrayRef", + dense_dim = "int64_t") +nd_args <- c("out", "self", "blocksize") +return_types <- list(list('Tensor')) call_c_function( -fun_name = '_transform_bias_rescale_qkv', +fun_name = '_to_sparse_bsc_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -8797,15 +10140,15 @@ fun_type = 'namespace' } -#' @rdname torch__transform_bias_rescale_qkv_out -torch__transform_bias_rescale_qkv_out <- function(out0, out1, out2, qkv, qkv_bias, num_heads) { - args <- mget(x = c("out0", "out1", "out2", "qkv", "qkv_bias", "num_heads")) -expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", qkv = "Tensor", - qkv_bias = "Tensor", num_heads = "int64_t") -nd_args <- c("out0", "out1", "out2", "qkv", "qkv_bias", "num_heads") -return_types <- list(list("Tensor", "Tensor", "Tensor")) +#' @rdname torch__to_sparse_bsr_out +torch__to_sparse_bsr_out <- function(out, self, blocksize, dense_dim = NULL) { + args <- mget(x = c("out", "self", "blocksize", "dense_dim")) +expected_types <- list(out = "Tensor", self = "Tensor", blocksize = "IntArrayRef", + dense_dim = "int64_t") +nd_args <- c("out", "self", "blocksize") +return_types <- list(list('Tensor')) call_c_function( -fun_name = '_transform_bias_rescale_qkv_out', +fun_name = '_to_sparse_bsr_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -8815,23 +10158,83 @@ fun_type = 'namespace' } -#' @rdname torch__transformer_decoder_only_layer_fwd -torch__transformer_decoder_only_layer_fwd <- function(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask = list(), incr_key = list(), incr_value = list()) { - args <- mget(x = c("src", "embed_dim", "num_heads", "qkv_weight", "qkv_bias", "proj_weight", "proj_bias", "use_gelu", "norm_first", "eps", "norm_weight_1", "norm_bias_1", "norm_weight_2", "norm_bias_2", "ffn_weight_1", "ffn_bias_1", "ffn_weight_2", "ffn_bias_2", "mask", "incr_key", "incr_value")) -expected_types <- list(src = "Tensor", embed_dim = "int64_t", num_heads = "int64_t", - qkv_weight = "Tensor", qkv_bias = "Tensor", proj_weight = "Tensor", - proj_bias = "Tensor", use_gelu = "bool", norm_first = "bool", - eps = "double", norm_weight_1 = "Tensor", norm_bias_1 = "Tensor", - norm_weight_2 = "Tensor", norm_bias_2 = "Tensor", ffn_weight_1 = "Tensor", - ffn_bias_1 = "Tensor", ffn_weight_2 = "Tensor", ffn_bias_2 = "Tensor", - mask = "Tensor", incr_key = "Tensor", incr_value = "Tensor") -nd_args <- c("src", "embed_dim", "num_heads", "qkv_weight", "qkv_bias", -"proj_weight", "proj_bias", "use_gelu", "norm_first", "eps", -"norm_weight_1", "norm_bias_1", "norm_weight_2", "norm_bias_2", -"ffn_weight_1", "ffn_bias_1", "ffn_weight_2", "ffn_bias_2") +#' @rdname torch__to_sparse_csc_out +torch__to_sparse_csc_out <- function(out, self, dense_dim = NULL) { + args <- mget(x = c("out", "self", "dense_dim")) +expected_types <- list(out = "Tensor", self = "Tensor", dense_dim = "int64_t") +nd_args <- c("out", "self") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_to_sparse_csc_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__to_sparse_csr_out +torch__to_sparse_csr_out <- function(out, self, dense_dim = NULL) { + args <- mget(x = c("out", "self", "dense_dim")) +expected_types <- list(out = "Tensor", self = "Tensor", dense_dim = "int64_t") +nd_args <- c("out", "self") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_to_sparse_csr_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__to_sparse_out +torch__to_sparse_out <- function(out, self, layout = NULL, sparse_dim, blocksize = NULL, dense_dim = NULL) { + args <- mget(x = c("out", "self", "layout", "sparse_dim", "blocksize", "dense_dim")) +expected_types <- list(out = "Tensor", self = "Tensor", layout = "Layout", sparse_dim = "int64_t", + blocksize = "IntArrayRef", dense_dim = "int64_t") +nd_args <- c("out", "self", "sparse_dim") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_to_sparse_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__to_sparse_semi_structured +torch__to_sparse_semi_structured <- function(dense) { + args <- mget(x = c("dense")) +expected_types <- list(dense = "Tensor") +nd_args <- "dense" +return_types <- list(list("Tensor", "Tensor")) +call_c_function( +fun_name = '_to_sparse_semi_structured', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__transform_bias_rescale_qkv +torch__transform_bias_rescale_qkv <- function(qkv, qkv_bias, num_heads) { + args <- mget(x = c("qkv", "qkv_bias", "num_heads")) +expected_types <- list(qkv = "Tensor", qkv_bias = "Tensor", num_heads = "int64_t") +nd_args <- c("qkv", "qkv_bias", "num_heads") return_types <- list(list("Tensor", "Tensor", "Tensor")) call_c_function( -fun_name = '_transformer_decoder_only_layer_fwd', +fun_name = '_transform_bias_rescale_qkv', args = args, expected_types = expected_types, nd_args = nd_args, @@ -8841,24 +10244,15 @@ fun_type = 'namespace' } -#' @rdname torch__transformer_decoder_only_layer_fwd_out -torch__transformer_decoder_only_layer_fwd_out <- function(out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask = list(), incr_key = list(), incr_value = list()) { - args <- mget(x = c("out0", "out1", "out2", "src", "embed_dim", "num_heads", "qkv_weight", "qkv_bias", "proj_weight", "proj_bias", "use_gelu", "norm_first", "eps", "norm_weight_1", "norm_bias_1", "norm_weight_2", "norm_bias_2", "ffn_weight_1", "ffn_bias_1", "ffn_weight_2", "ffn_bias_2", "mask", "incr_key", "incr_value")) -expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", src = "Tensor", - embed_dim = "int64_t", num_heads = "int64_t", qkv_weight = "Tensor", - qkv_bias = "Tensor", proj_weight = "Tensor", proj_bias = "Tensor", - use_gelu = "bool", norm_first = "bool", eps = "double", norm_weight_1 = "Tensor", - norm_bias_1 = "Tensor", norm_weight_2 = "Tensor", norm_bias_2 = "Tensor", - ffn_weight_1 = "Tensor", ffn_bias_1 = "Tensor", ffn_weight_2 = "Tensor", - ffn_bias_2 = "Tensor", mask = "Tensor", incr_key = "Tensor", - incr_value = "Tensor") -nd_args <- c("out0", "out1", "out2", "src", "embed_dim", "num_heads", "qkv_weight", -"qkv_bias", "proj_weight", "proj_bias", "use_gelu", "norm_first", -"eps", "norm_weight_1", "norm_bias_1", "norm_weight_2", "norm_bias_2", -"ffn_weight_1", "ffn_bias_1", "ffn_weight_2", "ffn_bias_2") +#' @rdname torch__transform_bias_rescale_qkv_out +torch__transform_bias_rescale_qkv_out <- function(out0, out1, out2, qkv, qkv_bias, num_heads) { + args <- mget(x = c("out0", "out1", "out2", "qkv", "qkv_bias", "num_heads")) +expected_types <- list(out0 = "Tensor", out1 = "Tensor", out2 = "Tensor", qkv = "Tensor", + qkv_bias = "Tensor", num_heads = "int64_t") +nd_args <- c("out0", "out1", "out2", "qkv", "qkv_bias", "num_heads") return_types <- list(list("Tensor", "Tensor", "Tensor")) call_c_function( -fun_name = '_transformer_decoder_only_layer_fwd_out', +fun_name = '_transform_bias_rescale_qkv_out', args = args, expected_types = expected_types, nd_args = nd_args, @@ -9124,6 +10518,77 @@ fun_type = 'namespace' } +#' @rdname torch__unsafe_index +torch__unsafe_index <- function(self, indices) { + args <- mget(x = c("self", "indices")) +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &") +nd_args <- c("self", "indices") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_unsafe_index', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__unsafe_index_put +torch__unsafe_index_put <- function(self, indices, values, accumulate = FALSE) { + args <- mget(x = c("self", "indices", "values", "accumulate")) +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", + values = "Tensor", accumulate = "bool") +nd_args <- c("self", "indices", "values") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_unsafe_index_put', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__unsafe_masked_index +torch__unsafe_masked_index <- function(self, mask, indices, fill) { + args <- mget(x = c("self", "mask", "indices", "fill")) +expected_types <- list(self = "Tensor", mask = "Tensor", indices = "const c10::List<::std::optional> &", + fill = "Scalar") +nd_args <- c("self", "mask", "indices", "fill") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_unsafe_masked_index', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__unsafe_masked_index_put_accumulate +torch__unsafe_masked_index_put_accumulate <- function(self, mask, indices, values) { + args <- mget(x = c("self", "mask", "indices", "values")) +expected_types <- list(self = "Tensor", mask = "Tensor", indices = "const c10::List<::std::optional> &", + values = "Tensor") +nd_args <- c("self", "mask", "indices", "values") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_unsafe_masked_index_put_accumulate', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__unsafe_view torch__unsafe_view <- function(self, size) { args <- mget(x = c("self", "size")) @@ -9625,9 +11090,10 @@ fun_type = 'namespace' #' @rdname torch__validate_sparse_coo_tensor_args -torch__validate_sparse_coo_tensor_args <- function(indices, values, size) { - args <- mget(x = c("indices", "values", "size")) -expected_types <- list(indices = "Tensor", values = "Tensor", size = "IntArrayRef") +torch__validate_sparse_coo_tensor_args <- function(indices, values, size, is_coalesced = NULL) { + args <- mget(x = c("indices", "values", "size", "is_coalesced")) +expected_types <- list(indices = "Tensor", values = "Tensor", size = "IntArrayRef", + is_coalesced = "bool") nd_args <- c("indices", "values", "size") return_types <- list(list("void")) call_c_function( @@ -9711,6 +11177,41 @@ fun_type = 'namespace' } +#' @rdname torch__weight_int4pack_mm +torch__weight_int4pack_mm <- function(self, mat2, qGroupSize, qScaleAndZeros) { + args <- mget(x = c("self", "mat2", "qGroupSize", "qScaleAndZeros")) +expected_types <- list(self = "Tensor", mat2 = "Tensor", qGroupSize = "int64_t", + qScaleAndZeros = "Tensor") +nd_args <- c("self", "mat2", "qGroupSize", "qScaleAndZeros") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_weight_int4pack_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__weight_int8pack_mm +torch__weight_int8pack_mm <- function(self, mat2, scales) { + args <- mget(x = c("self", "mat2", "scales")) +expected_types <- list(self = "Tensor", mat2 = "Tensor", scales = "Tensor") +nd_args <- c("self", "mat2", "scales") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_weight_int8pack_mm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch__weight_norm torch__weight_norm <- function(v, g, dim = 1L) { args <- mget(x = c("v", "g", "dim")) @@ -9818,6 +11319,44 @@ fun_type = 'namespace' } +#' @rdname torch__wrapped_linear_prepack +torch__wrapped_linear_prepack <- function(weight, weight_scale, weight_zero_point, bias) { + args <- mget(x = c("weight", "weight_scale", "weight_zero_point", "bias")) +expected_types <- list(weight = "Tensor", weight_scale = "Tensor", weight_zero_point = "Tensor", + bias = "Tensor") +nd_args <- c("weight", "weight_scale", "weight_zero_point", "bias") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_wrapped_linear_prepack', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch__wrapped_quantized_linear_prepacked +torch__wrapped_quantized_linear_prepacked <- function(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel) { + args <- mget(x = c("input", "input_scale", "input_zero_point", "packed_weight", "output_scale", "output_zero_point", "out_channel")) +expected_types <- list(input = "Tensor", input_scale = "Tensor", input_zero_point = "Tensor", + packed_weight = "Tensor", output_scale = "Tensor", output_zero_point = "Tensor", + out_channel = "int64_t") +nd_args <- c("input", "input_scale", "input_zero_point", "packed_weight", +"output_scale", "output_zero_point", "out_channel") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = '_wrapped_quantized_linear_prepacked', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_abs torch_abs <- function(self) { args <- mget(x = c("self")) @@ -10669,9 +12208,10 @@ fun_type = 'namespace' #' @rdname torch_all -torch_all <- function(self, dim, keepdim = FALSE) { +torch_all <- function(self, dim = NULL, keepdim = FALSE) { args <- mget(x = c("self", "dim", "keepdim")) -expected_types <- list(self = "Tensor", dim = c("int64_t", "Dimname"), keepdim = "bool") +expected_types <- list(self = "Tensor", dim = c("int64_t", "IntArrayRef", "Dimname" +), keepdim = "bool") nd_args <- c("self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -10686,10 +12226,10 @@ fun_type = 'namespace' #' @rdname torch_all_out -torch_all_out <- function(out, self, dim, keepdim = FALSE) { +torch_all_out <- function(out, self, dim = NULL, keepdim = FALSE) { args <- mget(x = c("out", "self", "dim", "keepdim")) -expected_types <- list(out = "Tensor", self = "Tensor", dim = c("int64_t", "Dimname" -), keepdim = "bool") +expected_types <- list(out = "Tensor", self = "Tensor", dim = c("int64_t", "IntArrayRef", +"Dimname"), keepdim = "bool") nd_args <- c("out", "self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -10893,9 +12433,10 @@ fun_type = 'namespace' #' @rdname torch_any -torch_any <- function(self, dim, keepdim = FALSE) { +torch_any <- function(self, dim = NULL, keepdim = FALSE) { args <- mget(x = c("self", "dim", "keepdim")) -expected_types <- list(self = "Tensor", dim = c("int64_t", "Dimname"), keepdim = "bool") +expected_types <- list(self = "Tensor", dim = c("int64_t", "IntArrayRef", "Dimname" +), keepdim = "bool") nd_args <- c("self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -10910,10 +12451,10 @@ fun_type = 'namespace' #' @rdname torch_any_out -torch_any_out <- function(out, self, dim, keepdim = FALSE) { +torch_any_out <- function(out, self, dim = NULL, keepdim = FALSE) { args <- mget(x = c("out", "self", "dim", "keepdim")) -expected_types <- list(out = "Tensor", self = "Tensor", dim = c("int64_t", "Dimname" -), keepdim = "bool") +expected_types <- list(out = "Tensor", self = "Tensor", dim = c("int64_t", "IntArrayRef", +"Dimname"), keepdim = "bool") nd_args <- c("out", "self", "dim") return_types <- list(list('Tensor')) call_c_function( @@ -12085,14 +13626,36 @@ fun_type = 'namespace' } +#' @rdname torch_batch_norm_backward +torch_batch_norm_backward <- function(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve) { + args <- mget(x = c("grad_out", "input", "weight", "running_mean", "running_var", "save_mean", "save_var", "update", "eps", "output_mask", "reserve")) +expected_types <- list(grad_out = "Tensor", input = "Tensor", weight = "Tensor", + running_mean = "Tensor", running_var = "Tensor", save_mean = "Tensor", + save_var = "Tensor", update = "bool", eps = "double", output_mask = "::std::array", + reserve = "Tensor") +nd_args <- c("grad_out", "input", "weight", "running_mean", "running_var", +"save_mean", "save_var", "update", "eps", "output_mask", "reserve" +) +return_types <- list(list("Tensor", "Tensor", "Tensor")) +call_c_function( +fun_name = 'batch_norm_backward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_batch_norm_backward_elemt -torch_batch_norm_backward_elemt <- function(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) { - args <- mget(x = c("grad_out", "input", "mean", "invstd", "weight", "mean_dy", "mean_dy_xmu", "count")) +torch_batch_norm_backward_elemt <- function(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) { + args <- mget(x = c("grad_out", "input", "mean", "invstd", "weight", "sum_dy", "sum_dy_xmu", "count")) expected_types <- list(grad_out = "Tensor", input = "Tensor", mean = "Tensor", - invstd = "Tensor", weight = "Tensor", mean_dy = "Tensor", - mean_dy_xmu = "Tensor", count = "Tensor") -nd_args <- c("grad_out", "input", "mean", "invstd", "weight", "mean_dy", -"mean_dy_xmu", "count") + invstd = "Tensor", weight = "Tensor", sum_dy = "Tensor", + sum_dy_xmu = "Tensor", count = "Tensor") +nd_args <- c("grad_out", "input", "mean", "invstd", "weight", "sum_dy", +"sum_dy_xmu", "count") return_types <- list(list('Tensor')) call_c_function( fun_name = 'batch_norm_backward_elemt', @@ -12106,13 +13669,13 @@ fun_type = 'namespace' #' @rdname torch_batch_norm_backward_elemt_out -torch_batch_norm_backward_elemt_out <- function(out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count) { - args <- mget(x = c("out", "grad_out", "input", "mean", "invstd", "weight", "mean_dy", "mean_dy_xmu", "count")) +torch_batch_norm_backward_elemt_out <- function(out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count) { + args <- mget(x = c("out", "grad_out", "input", "mean", "invstd", "weight", "sum_dy", "sum_dy_xmu", "count")) expected_types <- list(out = "Tensor", grad_out = "Tensor", input = "Tensor", mean = "Tensor", - invstd = "Tensor", weight = "Tensor", mean_dy = "Tensor", - mean_dy_xmu = "Tensor", count = "Tensor") -nd_args <- c("out", "grad_out", "input", "mean", "invstd", "weight", "mean_dy", -"mean_dy_xmu", "count") + invstd = "Tensor", weight = "Tensor", sum_dy = "Tensor", + sum_dy_xmu = "Tensor", count = "Tensor") +nd_args <- c("out", "grad_out", "input", "mean", "invstd", "weight", "sum_dy", +"sum_dy_xmu", "count") return_types <- list(list('Tensor')) call_c_function( fun_name = 'batch_norm_backward_elemt_out', @@ -12967,10 +14530,10 @@ fun_type = 'namespace' #' @rdname torch_can_cast -torch_can_cast <- function(from, to) { - args <- mget(x = c("from", "to")) -expected_types <- list(from = "ScalarType", to = "ScalarType") -nd_args <- c("from", "to") +torch_can_cast <- function(from_, to) { + args <- mget(x = c("from_", "to")) +expected_types <- list(from_ = "ScalarType", to = "ScalarType") +nd_args <- c("from_", "to") return_types <- list(list('bool')) call_c_function( fun_name = 'can_cast', @@ -16446,6 +18009,40 @@ fun_type = 'namespace' } +#' @rdname torch_empty_permuted +torch_empty_permuted <- function(size, physical_layout, options = list()) { + args <- mget(x = c("size", "physical_layout", "options")) +expected_types <- list(size = "IntArrayRef", physical_layout = "IntArrayRef", options = "TensorOptions") +nd_args <- c("size", "physical_layout") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'empty_permuted', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch_empty_permuted_out +torch_empty_permuted_out <- function(out, size, physical_layout) { + args <- mget(x = c("out", "size", "physical_layout")) +expected_types <- list(out = "Tensor", size = "IntArrayRef", physical_layout = "IntArrayRef") +nd_args <- c("out", "size", "physical_layout") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'empty_permuted_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_empty_quantized torch_empty_quantized <- function(size, qtensor, options = list(), memory_format = NULL) { args <- mget(x = c("size", "qtensor", "options", "memory_format")) @@ -18339,7 +19936,8 @@ fun_type = 'namespace' #' @rdname torch_floor_divide_out torch_floor_divide_out <- function(out, self, other) { args <- mget(x = c("out", "self", "other")) -expected_types <- list(out = "Tensor", self = "Tensor", other = "Tensor") +expected_types <- list(out = "Tensor", self = "Tensor", other = c("Tensor", "Scalar" +)) nd_args <- c("out", "self", "other") return_types <- list(list('Tensor')) call_c_function( @@ -20606,7 +22204,7 @@ fun_type = 'namespace' #' @rdname torch_index torch_index <- function(self, indices) { args <- mget(x = c("self", "indices")) -expected_types <- list(self = "Tensor", indices = "const c10::List> &") +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &") nd_args <- c("self", "indices") return_types <- list(list('Tensor')) call_c_function( @@ -20731,7 +22329,7 @@ fun_type = 'namespace' #' @rdname torch_index_out torch_index_out <- function(out, self, indices) { args <- mget(x = c("out", "self", "indices")) -expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List> &") +expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List<::std::optional> &") nd_args <- c("out", "self", "indices") return_types <- list(list('Tensor')) call_c_function( @@ -20748,7 +22346,7 @@ fun_type = 'namespace' #' @rdname torch_index_put torch_index_put <- function(self, indices, values, accumulate = FALSE) { args <- mget(x = c("self", "indices", "values", "accumulate")) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -20766,7 +22364,7 @@ fun_type = 'namespace' #' @rdname torch_index_put_ torch_index_put_ <- function(self, indices, values, accumulate = FALSE) { args <- mget(x = c("self", "indices", "values", "accumulate")) -expected_types <- list(self = "Tensor", indices = "const c10::List> &", +expected_types <- list(self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool") nd_args <- c("self", "indices", "values") return_types <- list(list('Tensor')) @@ -20784,7 +22382,7 @@ fun_type = 'namespace' #' @rdname torch_index_put_out torch_index_put_out <- function(out, self, indices, values, accumulate = FALSE) { args <- mget(x = c("out", "self", "indices", "values", "accumulate")) -expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List> &", +expected_types <- list(out = "Tensor", self = "Tensor", indices = "const c10::List<::std::optional> &", values = "Tensor", accumulate = "bool") nd_args <- c("out", "self", "indices", "values") return_types <- list(list('Tensor')) @@ -23532,7 +25130,8 @@ fun_type = 'namespace' #' @rdname .torch_linspace .torch_linspace <- function(start, end, steps, options = list()) { args <- mget(x = c("start", "end", "steps", "options")) -expected_types <- list(start = "Scalar", end = "Scalar", steps = "int64_t", options = "TensorOptions") +expected_types <- list(start = c("Scalar", "Tensor"), end = c("Scalar", "Tensor" +), steps = "int64_t", options = "TensorOptions") nd_args <- c("start", "end", "steps") return_types <- list(list('Tensor')) call_c_function( @@ -23549,7 +25148,8 @@ fun_type = 'namespace' #' @rdname torch_linspace_out torch_linspace_out <- function(out, start, end, steps) { args <- mget(x = c("out", "start", "end", "steps")) -expected_types <- list(out = "Tensor", start = "Scalar", end = "Scalar", steps = "int64_t") +expected_types <- list(out = "Tensor", start = c("Scalar", "Tensor"), end = c("Scalar", +"Tensor"), steps = "int64_t") nd_args <- c("out", "start", "end", "steps") return_types <- list(list('Tensor')) call_c_function( @@ -24284,8 +25884,8 @@ fun_type = 'namespace' #' @rdname .torch_logspace .torch_logspace <- function(start, end, steps, base = 10, options = list()) { args <- mget(x = c("start", "end", "steps", "base", "options")) -expected_types <- list(start = "Scalar", end = "Scalar", steps = "int64_t", base = "double", - options = "TensorOptions") +expected_types <- list(start = c("Scalar", "Tensor"), end = c("Scalar", "Tensor" +), steps = "int64_t", base = "double", options = "TensorOptions") nd_args <- c("start", "end", "steps") return_types <- list(list('Tensor')) call_c_function( @@ -24302,8 +25902,8 @@ fun_type = 'namespace' #' @rdname torch_logspace_out torch_logspace_out <- function(out, start, end, steps, base = 10) { args <- mget(x = c("out", "start", "end", "steps", "base")) -expected_types <- list(out = "Tensor", start = "Scalar", end = "Scalar", steps = "int64_t", - base = "double") +expected_types <- list(out = "Tensor", start = c("Scalar", "Tensor"), end = c("Scalar", +"Tensor"), steps = "int64_t", base = "double") nd_args <- c("out", "start", "end", "steps") return_types <- list(list('Tensor')) call_c_function( @@ -24617,6 +26217,23 @@ fun_type = 'namespace' } +#' @rdname torch_masked_scatter_backward +torch_masked_scatter_backward <- function(grad_output, mask, sizes) { + args <- mget(x = c("grad_output", "mask", "sizes")) +expected_types <- list(grad_output = "Tensor", mask = "Tensor", sizes = "IntArrayRef") +nd_args <- c("grad_output", "mask", "sizes") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'masked_scatter_backward', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_masked_scatter_out torch_masked_scatter_out <- function(out, self, mask, source) { args <- mget(x = c("out", "self", "mask", "source")) @@ -26237,10 +27854,10 @@ fun_type = 'namespace' #' @rdname torch_mkldnn_reorder_conv3d_weight -torch_mkldnn_reorder_conv3d_weight <- function(self, padding = 0L, stride = 1L, dilation = 1L, groups = 1L) { - args <- mget(x = c("self", "padding", "stride", "dilation", "groups")) +torch_mkldnn_reorder_conv3d_weight <- function(self, padding = 0L, stride = 1L, dilation = 1L, groups = 1L, input_size = NULL) { + args <- mget(x = c("self", "padding", "stride", "dilation", "groups", "input_size")) expected_types <- list(self = "Tensor", padding = "IntArrayRef", stride = "IntArrayRef", - dilation = "IntArrayRef", groups = "int64_t") + dilation = "IntArrayRef", groups = "int64_t", input_size = "IntArrayRef") nd_args <- "self" return_types <- list(list('Tensor')) call_c_function( @@ -26255,10 +27872,11 @@ fun_type = 'namespace' #' @rdname torch_mkldnn_reorder_conv3d_weight_out -torch_mkldnn_reorder_conv3d_weight_out <- function(out, self, padding = 0L, stride = 1L, dilation = 1L, groups = 1L) { - args <- mget(x = c("out", "self", "padding", "stride", "dilation", "groups")) +torch_mkldnn_reorder_conv3d_weight_out <- function(out, self, padding = 0L, stride = 1L, dilation = 1L, groups = 1L, input_size = NULL) { + args <- mget(x = c("out", "self", "padding", "stride", "dilation", "groups", "input_size")) expected_types <- list(out = "Tensor", self = "Tensor", padding = "IntArrayRef", - stride = "IntArrayRef", dilation = "IntArrayRef", groups = "int64_t") + stride = "IntArrayRef", dilation = "IntArrayRef", groups = "int64_t", + input_size = "IntArrayRef") nd_args <- c("out", "self") return_types <- list(list('Tensor')) call_c_function( @@ -28205,6 +29823,40 @@ fun_type = 'namespace' } +#' @rdname torch_nonzero_static +torch_nonzero_static <- function(self, size, fill_value = -1L) { + args <- mget(x = c("self", "size", "fill_value")) +expected_types <- list(self = "Tensor", size = "int64_t", fill_value = "int64_t") +nd_args <- c("self", "size") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'nonzero_static', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch_nonzero_static_out +torch_nonzero_static_out <- function(out, self, size, fill_value = -1L) { + args <- mget(x = c("out", "self", "size", "fill_value")) +expected_types <- list(out = "Tensor", self = "Tensor", size = "int64_t", fill_value = "int64_t") +nd_args <- c("out", "self", "size") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'nonzero_static_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname .torch_norm .torch_norm <- function(self, p = 2L, dim, keepdim = FALSE, dtype) { args <- mget(x = c("self", "p", "dim", "keepdim", "dtype")) @@ -28588,9 +30240,10 @@ fun_type = 'namespace' #' @rdname torch_pad_sequence -torch_pad_sequence <- function(sequences, batch_first = FALSE, padding_value = 0L) { - args <- mget(x = c("sequences", "batch_first", "padding_value")) -expected_types <- list(sequences = "TensorList", batch_first = "bool", padding_value = "double") +torch_pad_sequence <- function(sequences, batch_first = FALSE, padding_value = 0L, padding_side = "right") { + args <- mget(x = c("sequences", "batch_first", "padding_value", "padding_side")) +expected_types <- list(sequences = "TensorList", batch_first = "bool", padding_value = "double", + padding_side = "c10::string_view") nd_args <- "sequences" return_types <- list(list('Tensor')) call_c_function( @@ -29493,6 +31146,43 @@ fun_type = 'namespace' } +#' @rdname torch_quantized_max_pool3d +torch_quantized_max_pool3d <- function(self, kernel_size, stride = list(), padding = 0L, dilation = 1L, ceil_mode = FALSE) { + args <- mget(x = c("self", "kernel_size", "stride", "padding", "dilation", "ceil_mode")) +expected_types <- list(self = "Tensor", kernel_size = "IntArrayRef", stride = "IntArrayRef", + padding = "IntArrayRef", dilation = "IntArrayRef", ceil_mode = "bool") +nd_args <- c("self", "kernel_size") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'quantized_max_pool3d', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch_quantized_max_pool3d_out +torch_quantized_max_pool3d_out <- function(out, self, kernel_size, stride = list(), padding = 0L, dilation = 1L, ceil_mode = FALSE) { + args <- mget(x = c("out", "self", "kernel_size", "stride", "padding", "dilation", "ceil_mode")) +expected_types <- list(out = "Tensor", self = "Tensor", kernel_size = "IntArrayRef", + stride = "IntArrayRef", padding = "IntArrayRef", dilation = "IntArrayRef", + ceil_mode = "bool") +nd_args <- c("out", "self", "kernel_size") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'quantized_max_pool3d_out', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_quantized_rnn_relu_cell torch_quantized_rnn_relu_cell <- function(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh) { args <- mget(x = c("input", "hx", "w_ih", "w_hh", "b_ih", "b_hh", "packed_ih", "packed_hh", "col_offsets_ih", "col_offsets_hh", "scale_ih", "scale_hh", "zero_point_ih", "zero_point_hh")) @@ -30816,6 +32506,24 @@ fun_type = 'namespace' } +#' @rdname torch_rms_norm +torch_rms_norm <- function(input, normalized_shape, weight = list(), eps = NULL) { + args <- mget(x = c("input", "normalized_shape", "weight", "eps")) +expected_types <- list(input = "Tensor", normalized_shape = "IntArrayRef", weight = "Tensor", + eps = "double") +nd_args <- c("input", "normalized_shape") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'rms_norm', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_rnn_relu torch_rnn_relu <- function(data, input, batch_sizes, hx, params, has_biases, num_layers, dropout, train, batch_first, bidirectional) { args <- mget(x = c("data", "input", "batch_sizes", "hx", "params", "has_biases", "num_layers", "dropout", "train", "batch_first", "bidirectional")) @@ -31334,10 +33042,11 @@ fun_type = 'namespace' #' @rdname torch_scaled_dot_product_attention -torch_scaled_dot_product_attention <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE) { - args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal")) +torch_scaled_dot_product_attention <- function(query, key, value, attn_mask = list(), dropout_p = 0L, is_causal = FALSE, scale = NULL, enable_gqa = FALSE) { + args <- mget(x = c("query", "key", "value", "attn_mask", "dropout_p", "is_causal", "scale", "enable_gqa")) expected_types <- list(query = "Tensor", key = "Tensor", value = "Tensor", attn_mask = "Tensor", - dropout_p = "double", is_causal = "bool") + dropout_p = "double", is_causal = "bool", scale = "double", + enable_gqa = "bool") nd_args <- c("query", "key", "value") return_types <- list(list('Tensor')) call_c_function( @@ -32261,6 +33970,24 @@ fun_type = 'namespace' } +#' @rdname torch_slice_inverse +torch_slice_inverse <- function(self, src, dim = 1L, start = NULL, end = NULL, step = 1L) { + args <- mget(x = c("self", "src", "dim", "start", "end", "step")) +expected_types <- list(self = "Tensor", src = "Tensor", dim = "int64_t", start = "int64_t", + end = "int64_t", step = "int64_t") +nd_args <- c("self", "src") +return_types <- list(list('Tensor')) +call_c_function( +fun_name = 'slice_inverse', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_slice_scatter torch_slice_scatter <- function(self, src, dim = 1L, start = NULL, end = NULL, step = 1L) { args <- mget(x = c("self", "src", "dim", "start", "end", "step")) @@ -32982,10 +34709,10 @@ fun_type = 'namespace' #' @rdname .torch_sparse_coo_tensor -.torch_sparse_coo_tensor <- function(indices, values, size, options = list()) { - args <- mget(x = c("indices", "values", "size", "options")) +.torch_sparse_coo_tensor <- function(indices, values, size, options = list(), is_coalesced = NULL) { + args <- mget(x = c("indices", "values", "size", "options", "is_coalesced")) expected_types <- list(indices = "Tensor", values = "Tensor", size = "IntArrayRef", - options = "TensorOptions") + options = "TensorOptions", is_coalesced = "bool") nd_args <- c("indices", "values", "size", "options") return_types <- list(list('Tensor')) call_c_function( @@ -35643,6 +37370,40 @@ fun_type = 'namespace' } +#' @rdname torch_sym_constrain_range +torch_sym_constrain_range <- function(size, min = NULL, max = NULL) { + args <- mget(x = c("size", "min", "max")) +expected_types <- list(size = "Scalar", min = "int64_t", max = "int64_t") +nd_args <- "size" +return_types <- list(list("void")) +call_c_function( +fun_name = 'sym_constrain_range', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + +#' @rdname torch_sym_constrain_range_for_size +torch_sym_constrain_range_for_size <- function(size, min = NULL, max = NULL) { + args <- mget(x = c("size", "min", "max")) +expected_types <- list(size = "Scalar", min = "int64_t", max = "int64_t") +nd_args <- "size" +return_types <- list(list("void")) +call_c_function( +fun_name = 'sym_constrain_range_for_size', +args = args, +expected_types = expected_types, +nd_args = nd_args, +return_types = return_types, +fun_type = 'namespace' +) +} + + #' @rdname torch_t torch_t <- function(self) { args <- mget(x = c("self")) @@ -36093,9 +37854,9 @@ fun_type = 'namespace' #' @rdname torch_to_dense_backward -torch_to_dense_backward <- function(grad, input) { - args <- mget(x = c("grad", "input")) -expected_types <- list(grad = "Tensor", input = "Tensor") +torch_to_dense_backward <- function(grad, input, masked_grad = NULL) { + args <- mget(x = c("grad", "input", "masked_grad")) +expected_types <- list(grad = "Tensor", input = "Tensor", masked_grad = "bool") nd_args <- c("grad", "input") return_types <- list(list('Tensor')) call_c_function( @@ -36160,94 +37921,6 @@ fun_type = 'namespace' } -#' @rdname torch_to_sparse_bsc_out -torch_to_sparse_bsc_out <- function(out, self, blocksize, dense_dim = NULL) { - args <- mget(x = c("out", "self", "blocksize", "dense_dim")) -expected_types <- list(out = "Tensor", self = "Tensor", blocksize = "IntArrayRef", - dense_dim = "int64_t") -nd_args <- c("out", "self", "blocksize") -return_types <- list(list('Tensor')) -call_c_function( -fun_name = 'to_sparse_bsc_out', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch_to_sparse_bsr_out -torch_to_sparse_bsr_out <- function(out, self, blocksize, dense_dim = NULL) { - args <- mget(x = c("out", "self", "blocksize", "dense_dim")) -expected_types <- list(out = "Tensor", self = "Tensor", blocksize = "IntArrayRef", - dense_dim = "int64_t") -nd_args <- c("out", "self", "blocksize") -return_types <- list(list('Tensor')) -call_c_function( -fun_name = 'to_sparse_bsr_out', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch_to_sparse_csc_out -torch_to_sparse_csc_out <- function(out, self, dense_dim = NULL) { - args <- mget(x = c("out", "self", "dense_dim")) -expected_types <- list(out = "Tensor", self = "Tensor", dense_dim = "int64_t") -nd_args <- c("out", "self") -return_types <- list(list('Tensor')) -call_c_function( -fun_name = 'to_sparse_csc_out', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch_to_sparse_csr_out -torch_to_sparse_csr_out <- function(out, self, dense_dim = NULL) { - args <- mget(x = c("out", "self", "dense_dim")) -expected_types <- list(out = "Tensor", self = "Tensor", dense_dim = "int64_t") -nd_args <- c("out", "self") -return_types <- list(list('Tensor')) -call_c_function( -fun_name = 'to_sparse_csr_out', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - -#' @rdname torch_to_sparse_out -torch_to_sparse_out <- function(out, self, layout = NULL, sparse_dim, blocksize = NULL, dense_dim = NULL) { - args <- mget(x = c("out", "self", "layout", "sparse_dim", "blocksize", "dense_dim")) -expected_types <- list(out = "Tensor", self = "Tensor", layout = "Layout", sparse_dim = "int64_t", - blocksize = "IntArrayRef", dense_dim = "int64_t") -nd_args <- c("out", "self", "sparse_dim") -return_types <- list(list('Tensor')) -call_c_function( -fun_name = 'to_sparse_out', -args = args, -expected_types = expected_types, -nd_args = nd_args, -return_types = return_types, -fun_type = 'namespace' -) -} - - #' @rdname .torch_topk .torch_topk <- function(self, k, dim = -1L, largest = TRUE, sorted = TRUE) { args <- mget(x = c("self", "k", "dim", "largest", "sorted")) diff --git a/R/install.R b/R/install.R index dd65f5cb99..f8f16ac3a7 100644 --- a/R/install.R +++ b/R/install.R @@ -1,5 +1,5 @@ branch <- "main" -torch_version <- "2.0.1" +torch_version <- "2.5.1" #' Install Torch #' @@ -176,11 +176,7 @@ libtorch_url <- function() { if (is_macos()) { arch <- architecture() - if (arch == "x86_64") { - url <- glue::glue("https://download.pytorch.org/libtorch/cpu/libtorch-macos-{torch_version}.zip") - } else if (arch == "arm64") { - url <- glue::glue("https://github.com/mlverse/libtorch-mac-m1/releases/download/LibTorch-for-R/libtorch-v{torch_version}.zip") - } + url <- glue::glue("https://github.com/mlverse/libtorch-mac-m1/releases/download/LibTorch-for-R/libtorch-{arch}-v{torch_version}.zip") } kind <- installation_kind() if (is_windows()) { @@ -457,12 +453,12 @@ cuda_version_windows <- function() { } check_supported_cuda_version_windows <- function(version) { - supported_versions <- c("11.7", "11.8") + supported_versions <- c("11.8", "12.4") check_supported_version(version, supported_versions) } check_supported_cuda_version_linux <- function(version) { - supported_versions <- c("11.7", "11.8") + supported_versions <- c("11.8", "12.4") check_supported_version(version, supported_versions) } diff --git a/R/nnf-activation.R b/R/nnf-activation.R index e5aa7603e9..0a65f897bd 100644 --- a/R/nnf-activation.R +++ b/R/nnf-activation.R @@ -728,10 +728,14 @@ nnf_multi_head_attention_forward <- function(query, # type: Tensor if (!is.null(key_padding_mask)) { attn_output_weights <- attn_output_weights$view(c(bsz, num_heads, tgt_len, src_len)) - attn_output_weights <- attn_output_weights$masked_fill( - key_padding_mask$unsqueeze(2)$unsqueeze(3), - -Inf - ) + if (key_padding_mask$dtype == torch_bool()) { + attn_output_weights <- attn_output_weights$masked_fill( + key_padding_mask$unsqueeze(2)$unsqueeze(3), + -Inf + ) + } else { + attn_output_weights <- attn_output_weights + key_padding_mask$unsqueeze(2)$unsqueeze(3) + } attn_output_weights <- attn_output_weights$view(c( bsz * num_heads, tgt_len, diff --git a/R/positron.R b/R/positron.R index 66f6d5631f..d599ab3f60 100644 --- a/R/positron.R +++ b/R/positron.R @@ -29,7 +29,7 @@ register_positron_methods_impl <- function() { register_ark_method("ark_positron_variable_display_value", "nn_module", function(x, ...) { paste0( "nn_module (", - scales::comma(torch:::get_parameter_count(attr(x, "module"))), + scales::comma(get_parameter_count(attr(x, "module"))), " parameters)" ) }) diff --git a/R/tensor.R b/R/tensor.R index 99de733325..d5213dc92f 100644 --- a/R/tensor.R +++ b/R/tensor.R @@ -89,6 +89,10 @@ Tensor <- R7Class( dtype <- self$dtype } + if (is.null(memory_format)) { + memory_format <- torch_preserve_format() + } + if (has_device) { private$`_to`( dtype = dtype, diff --git a/configure.win b/configure.win index eea4fae9bd..5dee7817d8 100755 --- a/configure.win +++ b/configure.win @@ -1,2 +1,2 @@ #!/usr/bin/env sh -CMAKE_FLAGS="-G\"Visual Studio 16\" $CMAKE_FLAGS" sh ./configure +CMAKE_FLAGS="-G\"Visual Studio 17\" $CMAKE_FLAGS" sh ./configure diff --git a/inst/include/lantern/lantern.h b/inst/include/lantern/lantern.h index 863bfcafb1..62bc14bd21 100644 --- a/inst/include/lantern/lantern.h +++ b/inst/include/lantern/lantern.h @@ -6,6 +6,8 @@ #else #define WIN32_LEAN_AND_MEAN 1 #include +#undef max +#undef min #endif #ifndef HOST_API @@ -48,13 +50,13 @@ void check_lantern_loaded(); extern int lanternLogEnabled; #define LLOG(...) if ((lanternLogEnabled & 1) == 1) { \ - printf("%ld INFO ", time(NULL)); \ + printf("%lld INFO ", (long long)time(NULL)); \ printf(__VA_ARGS__); \ printf("\n"); \ } \ if ((lanternLogEnabled & 2) == 2) { \ FILE *pFile = fopen("lantern.log", "a"); \ - fprintf(pFile, "%ld INFO ", time(NULL)); \ + fprintf(pFile, "%lld INFO ", (long long)time(NULL)); \ fprintf(pFile, __VA_ARGS__); \ fprintf(pFile, "\n"); \ fclose(pFile); \ @@ -2343,7 +2345,7 @@ LANTERN_API void* (LANTERN_PTR _lantern_torch_show_config) (); HOST_API void* lantern_torch_show_config () { LANTERN_CHECK_LOADED - auto ret = _lantern_torch_show_config(); + void* ret = _lantern_torch_show_config(); LANTERN_HOST_HANDLER; return ret; } @@ -2352,7 +2354,7 @@ LANTERN_API void* (LANTERN_PTR _lantern_torch_parallel_info) (); HOST_API void* lantern_torch_parallel_info () { LANTERN_CHECK_LOADED - auto ret = _lantern_torch_parallel_info(); + void* ret = _lantern_torch_parallel_info(); LANTERN_HOST_HANDLER; return ret; } @@ -2658,8 +2660,28 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_align_tensors_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern_align_tensors_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__assert_async_tensor)(void* self); HOST_API void* lantern__assert_async_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_async_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__assert_async_tensor_cstringview)(void* self, void* assert_msg); + HOST_API void* lantern__assert_async_tensor_cstringview(void* self, void* assert_msg) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_async_tensor_cstringview(self, assert_msg); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__assert_scalar_scalar_cstringview)(void* self, void* assert_msg); + HOST_API void* lantern__assert_scalar_scalar_cstringview(void* self, void* assert_msg) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_scalar_scalar_cstringview(self, assert_msg); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_assert_scalar_scalar_cstringview_tensor)(void* self, void* assert_msg, void* dep_token); + HOST_API void* lantern__functional_assert_scalar_scalar_cstringview_tensor(void* self, void* assert_msg, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_assert_scalar_scalar_cstringview_tensor(self, assert_msg, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_assert_async_tensor_cstringview_tensor)(void* self, void* assert_msg, void* dep_token); + HOST_API void* lantern__functional_assert_async_tensor_cstringview_tensor(void* self, void* assert_msg, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_assert_async_tensor_cstringview_tensor(self, assert_msg, dep_token); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype)(void* a, void* size, void* stride, void* dtype); HOST_API void* lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(void* a, void* size, void* stride, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(a, size, stride, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__print_cstringview)(void* s); + HOST_API void* lantern__print_cstringview(void* s) { LANTERN_CHECK_LOADED void* ret = _lantern__print_cstringview(s); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sym_constrain_range_scalar_intt_intt)(void* size, void* min, void* max); + HOST_API void* lantern_sym_constrain_range_scalar_intt_intt(void* size, void* min, void* max) { LANTERN_CHECK_LOADED void* ret = _lantern_sym_constrain_range_scalar_intt_intt(size, min, max); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sym_constrain_range_for_size_scalar_intt_intt)(void* size, void* min, void* max); + HOST_API void* lantern_sym_constrain_range_for_size_scalar_intt_intt(void* size, void* min, void* max) { LANTERN_CHECK_LOADED void* ret = _lantern_sym_constrain_range_for_size_scalar_intt_intt(size, min, max); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_sym_constrain_range_scalar_intt_intt_tensor)(void* size, void* min, void* max, void* dep_token); + HOST_API void* lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(size, min, max, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor)(void* size, void* min, void* max, void* dep_token); + HOST_API void* lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(size, min, max, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__make_dep_token_tensoroptions_memoryformat)(void* options, void* memory_format); + HOST_API void* lantern__make_dep_token_tensoroptions_memoryformat(void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern__make_dep_token_tensoroptions_memoryformat(options, memory_format); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_refine_names_tensor_dimnamelist)(void* self, void* names); HOST_API void* lantern_Tensor_refine_names_tensor_dimnamelist(void* self, void* names) { void* ret = _lantern_Tensor_refine_names_tensor_dimnamelist(self, names); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__use_cudnn_ctc_loss_tensor_tensor_intarrayref_intarrayref_intt)(void* log_probs, void* targets, void* input_lengths, void* target_lengths, void* blank); @@ -2876,12 +2898,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor__is_any_true_tensor(void* self) { void* ret = _lantern_Tensor__is_any_true_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_check_tensor_tensor)(void* self); HOST_API void* lantern__test_check_tensor_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__test_check_tensor_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_functorch_fallback_tensor_tensor)(void* self, void* other); + HOST_API void* lantern__test_functorch_fallback_tensor_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__test_functorch_fallback_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_all_tensor_intt_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_Tensor_all_tensor_intt_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_all_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_Tensor_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_all_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_out_tensor_tensor_intt_bool)(void* out, void* self, void* dim, void* keepdim); HOST_API void* lantern_all_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_out_tensor_tensor_intt_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_all_out_tensor_tensor_intarrayref_bool)(void* out, void* self, void* dim, void* keepdim); + HOST_API void* lantern_all_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_out_tensor_tensor_intarrayref_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_dimname_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_all_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_dimname_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_dimname_bool)(void* self, void* dim, void* keepdim); @@ -2896,8 +2926,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_any_tensor_intt_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_Tensor_any_tensor_intt_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_any_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_any_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_Tensor_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_any_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_any_out_tensor_tensor_intt_bool)(void* out, void* self, void* dim, void* keepdim); HOST_API void* lantern_any_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_out_tensor_tensor_intt_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_any_out_tensor_tensor_intarrayref_bool)(void* out, void* self, void* dim, void* keepdim); + HOST_API void* lantern_any_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_out_tensor_tensor_intarrayref_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_any_tensor_dimname_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_any_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_dimname_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_dimname_bool)(void* self, void* dim, void* keepdim); @@ -3122,6 +3158,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_copysign__tensor_scalar(void* self, void* other) { void* ret = _lantern_Tensor_copysign__tensor_scalar(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_copysign_out_tensor_tensor_scalar)(void* out, void* self, void* other); HOST_API void* lantern_copysign_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_copysign_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__lazy_clone_tensor)(void* self); + HOST_API void* lantern__lazy_clone_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__lazy_clone_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__lazy_clone_tensor)(void* self); + HOST_API void* lantern_Tensor__lazy_clone_tensor(void* self) { void* ret = _lantern_Tensor__lazy_clone_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logical_not_tensor)(void* self); HOST_API void* lantern_logical_not_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_logical_not_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_logical_not_tensor)(void* self); @@ -3420,6 +3460,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); + HOST_API void* lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mps_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups); @@ -3676,6 +3718,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat(void* size, void* names, void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat(size, names, options, memory_format); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_empty_intarrayref_tensoroptions_memoryformat)(void* size, void* options, void* memory_format); HOST_API void* lantern_empty_intarrayref_tensoroptions_memoryformat(void* size, void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_intarrayref_tensoroptions_memoryformat(size, options, memory_format); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_empty_permuted_intarrayref_intarrayref_tensoroptions)(void* size, void* physical_layout, void* options); + HOST_API void* lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(void* size, void* physical_layout, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(size, physical_layout, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions)(void* self, void* size, void* options); HOST_API void* lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions(void* self, void* size, void* options) { void* ret = _lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions(self, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_new_empty_strided_tensor_intarrayref_intarrayref_tensoroptions)(void* self, void* size, void* stride, void* options); @@ -3918,20 +3962,26 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* normalization, void* forward) { LANTERN_CHECK_LOADED void* ret = _lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, normalization, forward); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt)(void* is_crow, void* compressed_idx, void* plain_idx, void* cdim, void* dim, void* nnz); HOST_API void* lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt(void* is_crow, void* compressed_idx, void* plain_idx, void* cdim, void* dim, void* nnz) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt(is_crow, compressed_idx, plain_idx, cdim, dim, nnz); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_size_intt)(void* device_index); - HOST_API void* lantern__cufft_get_plan_cache_size_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_size_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_max_size_intt)(void* device_index); - HOST_API void* lantern__cufft_get_plan_cache_max_size_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_max_size_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_set_plan_cache_max_size_intt_intt)(void* device_index, void* max_size); - HOST_API void* lantern__cufft_set_plan_cache_max_size_intt_intt(void* device_index, void* max_size) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_set_plan_cache_max_size_intt_intt(device_index, max_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_clear_plan_cache_intt)(void* device_index); - HOST_API void* lantern__cufft_clear_plan_cache_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_clear_plan_cache_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_tensor_constclistcoptionaltensor)(void* self, void* indices); - HOST_API void* lantern_index_tensor_constclistcoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_tensor_constclistcoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_tensor_constclistcoptionaltensor)(void* self, void* indices); - HOST_API void* lantern_Tensor_index_tensor_constclistcoptionaltensor(void* self, void* indices) { void* ret = _lantern_Tensor_index_tensor_constclistcoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_out_tensor_tensor_constclistcoptionaltensor)(void* out, void* self, void* indices); - HOST_API void* lantern_index_out_tensor_tensor_constclistcoptionaltensor(void* out, void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_out_tensor_tensor_constclistcoptionaltensor(out, self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_size_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_get_plan_cache_size_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_size_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_max_size_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_get_plan_cache_max_size_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_max_size_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_set_plan_cache_max_size_deviceindex_intt)(void* device_index, void* max_size); + HOST_API void* lantern__cufft_set_plan_cache_max_size_deviceindex_intt(void* device_index, void* max_size) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_set_plan_cache_max_size_deviceindex_intt(device_index, max_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_clear_plan_cache_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_clear_plan_cache_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_clear_plan_cache_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern_Tensor_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { void* ret = _lantern_Tensor_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_out_tensor_tensor_constcliststdoptionaltensor)(void* out, void* self, void* indices); + HOST_API void* lantern_index_out_tensor_tensor_constcliststdoptionaltensor(void* out, void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_out_tensor_tensor_constcliststdoptionaltensor(out, self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern__unsafe_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar)(void* self, void* mask, void* indices, void* fill); + HOST_API void* lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(void* self, void* mask, void* indices, void* fill) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(self, mask, indices, fill); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor)(void* self, void* mask, void* indices, void* values); + HOST_API void* lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(void* self, void* mask, void* indices, void* values) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(self, mask, indices, values); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_index_copy_out_tensor_tensor_intt_tensor_tensor)(void* out, void* self, void* dim, void* index, void* source); HOST_API void* lantern_index_copy_out_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* dim, void* index, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_index_copy_out_tensor_tensor_intt_tensor_tensor(out, self, dim, index, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_copy__tensor_intt_tensor_tensor)(void* self, void* dim, void* index, void* source); @@ -3946,16 +3996,18 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_index_copy_tensor_dimname_tensor_tensor(void* self, void* dim, void* index, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_index_copy_tensor_dimname_tensor_tensor(self, dim, index, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_copy_tensor_dimname_tensor_tensor)(void* self, void* dim, void* index, void* source); HOST_API void* lantern_Tensor_index_copy_tensor_dimname_tensor_tensor(void* self, void* dim, void* index, void* source) { void* ret = _lantern_Tensor_index_copy_tensor_dimname_tensor_tensor(self, dim, index, source); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* use_input_stats, void* momentum, void* eps, void* cudnn_enabled); HOST_API void* lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* use_input_stats, void* momentum, void* eps, void* cudnn_enabled) { LANTERN_CHECK_LOADED void* ret = _lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_isclose_tensor_tensor_double_double_bool)(void* self, void* other, void* rtol, void* atol, void* equal_nan); @@ -4048,6 +4100,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double(void* input, void* normalized_shape, void* weight, void* bias, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double(input, normalized_shape, weight, bias, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool)(void* grad_out, void* input, void* normalized_shape, void* mean, void* rstd, void* weight, void* bias, void* output_mask); HOST_API void* lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool(void* grad_out, void* input, void* normalized_shape, void* mean, void* rstd, void* weight, void* bias, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_rms_norm_tensor_intarrayref_tensor_double)(void* input, void* normalized_shape, void* weight, void* eps); + HOST_API void* lantern_rms_norm_tensor_intarrayref_tensor_double(void* input, void* normalized_shape, void* weight, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_rms_norm_tensor_intarrayref_tensor_double(input, normalized_shape, weight, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nan_to_num_tensor_double_double_double)(void* self, void* nan, void* posinf, void* neginf); HOST_API void* lantern_nan_to_num_tensor_double_double_double(void* self, void* nan, void* posinf, void* neginf) { LANTERN_CHECK_LOADED void* ret = _lantern_nan_to_num_tensor_double_double_double(self, nan, posinf, neginf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nan_to_num_tensor_double_double_double)(void* self, void* nan, void* posinf, void* neginf); @@ -4072,6 +4126,26 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool(void* grad_output, void* input, void* weight, void* bias_defined) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool(grad_output, input, weight, bias_defined); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool)(void* self, void* grad_output, void* weight, void* output_mask); HOST_API void* lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool(void* self, void* grad_output, void* weight, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool(self, grad_output, weight, output_mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_compress_tensor)(void* input); + HOST_API void* lantern__cslt_compress_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_compress_tensor(input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt)(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result, void* alg_id); + HOST_API void* lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result, void* alg_id) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool)(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result); + HOST_API void* lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_tile_tensor_cstringview_bool)(void* input, void* algorithm, void* use_cutlass); + HOST_API void* lantern__sparse_semi_structured_tile_tensor_cstringview_bool(void* input, void* algorithm, void* use_cutlass) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_tile_tensor_cstringview_bool(input, algorithm, use_cutlass); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_apply_tensor_tensor)(void* input, void* thread_masks); + HOST_API void* lantern__sparse_semi_structured_apply_tensor_tensor(void* input, void* thread_masks) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_apply_tensor_tensor(input, thread_masks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_apply_dense_tensor_tensor)(void* input, void* thread_masks); + HOST_API void* lantern__sparse_semi_structured_apply_dense_tensor_tensor(void* input, void* thread_masks) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_apply_dense_tensor_tensor(input, thread_masks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype)(void* input, void* weight, void* meta, void* bias, void* activation, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(void* input, void* weight, void* meta, void* bias, void* activation, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(input, weight, meta, bias, activation, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype)(void* mat1, void* mat1_meta, void* mat2, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(void* mat1, void* mat1_meta, void* mat2, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(mat1, mat1_meta, mat2, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype)(void* input, void* mat1, void* mat1_meta, void* mat2, void* alpha, void* beta, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(void* input, void* mat1, void* mat1_meta, void* mat2, void* alpha, void* beta, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview)(void* input, void* weight, void* scale, void* bias, void* activation); + HOST_API void* lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(void* input, void* weight, void* scale, void* bias, void* activation) { LANTERN_CHECK_LOADED void* ret = _lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(input, weight, scale, bias, activation); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor)(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias); HOST_API void* lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_int8_weight_tensor_tensor_tensor_tensor_scalar_scalar_tensor)(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias); @@ -4080,6 +4154,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_fbgemm_linear_quantize_weight_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_quantize_weight_tensor(input); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_pack_gemm_matrix_fp16_tensor)(void* input); HOST_API void* lantern_fbgemm_pack_gemm_matrix_fp16_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_pack_gemm_matrix_fp16_tensor(input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor)(void* weight, void* weight_scale, void* weight_zero_point, void* bias); + HOST_API void* lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(void* weight, void* weight_scale, void* weight_zero_point, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(weight, weight_scale, weight_zero_point, bias); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* input, void* input_scale, void* input_zero_point, void* packed_weight, void* output_scale, void* output_zero_point, void* out_channel); + HOST_API void* lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* input, void* input_scale, void* input_zero_point, void* packed_weight, void* output_scale, void* output_zero_point, void* out_channel) { LANTERN_CHECK_LOADED void* ret = _lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor)(void* input, void* packed_weight, void* bias); HOST_API void* lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(void* input, void* packed_weight, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(input, packed_weight, bias); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_fp16_weight_tensor_tensor_tensor)(void* input, void* packed_weight, void* bias); @@ -4100,8 +4178,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_ldexp_out_tensor_tensor_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_ldexp_out_tensor_tensor_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linspace_scalar_scalar_intt_tensoroptions)(void* start, void* end, void* steps, void* options); HOST_API void* lantern_linspace_scalar_scalar_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_scalar_scalar_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_tensor_tensor_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_tensor_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_tensor_tensor_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_tensor_scalar_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_tensor_scalar_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_tensor_scalar_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_scalar_tensor_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_scalar_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_scalar_tensor_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_scalar_scalar_intt)(void* out, void* start, void* end, void* steps); HOST_API void* lantern_linspace_out_tensor_scalar_scalar_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_scalar_scalar_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_tensor_tensor_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_tensor_tensor_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_tensor_tensor_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_tensor_scalar_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_tensor_scalar_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_tensor_scalar_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_scalar_tensor_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_scalar_tensor_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_scalar_tensor_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_log_tensor)(void* self); HOST_API void* lantern_log_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_log_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_log_tensor)(void* self); @@ -4180,8 +4270,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_xlogy_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_xlogy_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logspace_scalar_scalar_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); HOST_API void* lantern_logspace_scalar_scalar_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_scalar_scalar_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_tensor_tensor_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_tensor_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_tensor_tensor_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_tensor_scalar_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_tensor_scalar_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_tensor_scalar_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_scalar_tensor_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_scalar_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_scalar_tensor_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_scalar_scalar_intt_double)(void* out, void* start, void* end, void* steps, void* base); HOST_API void* lantern_logspace_out_tensor_scalar_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_scalar_scalar_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_tensor_tensor_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_tensor_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_tensor_tensor_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_tensor_scalar_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_tensor_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_tensor_scalar_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_scalar_tensor_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_scalar_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_scalar_tensor_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_log_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); HOST_API void* lantern_log_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_log_softmax_tensor_intt_scalartype(self, dim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_log_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); @@ -4304,12 +4406,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); + HOST_API void* lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mean_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_mean_tensor_scalartype(void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_mean_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_Tensor_mean_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_mean_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mean_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); + HOST_API void* lantern_mean_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mean_tensor_intarrayref_bool_scalartype)(void* self, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_mean_tensor_intarrayref_bool_scalartype(void* self, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_tensor_intarrayref_bool_scalartype(self, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_mean_tensor_intarrayref_bool_scalartype)(void* self, void* dim, void* keepdim, void* dtype); @@ -4412,6 +4518,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_mm_tensor_tensor(void* self, void* mat2) { void* ret = _lantern_Tensor_mm_tensor_tensor(self, mat2); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mm_out_tensor_tensor_tensor)(void* out, void* self, void* mat2); HOST_API void* lantern_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern_mm_out_tensor_tensor_tensor(out, self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__int_mm_tensor_tensor)(void* self, void* mat2); + HOST_API void* lantern__int_mm_tensor_tensor(void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern__int_mm_tensor_tensor(self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__int_mm_out_tensor_tensor_tensor)(void* out, void* self, void* mat2); + HOST_API void* lantern__int_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern__int_mm_out_tensor_tensor_tensor(out, self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__convert_weight_to_int4pack_tensor_intt)(void* self, void* innerKTiles); + HOST_API void* lantern__convert_weight_to_int4pack_tensor_intt(void* self, void* innerKTiles) { LANTERN_CHECK_LOADED void* ret = _lantern__convert_weight_to_int4pack_tensor_intt(self, innerKTiles); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__weight_int4pack_mm_tensor_tensor_intt_tensor)(void* self, void* mat2, void* qGroupSize, void* qScaleAndZeros); + HOST_API void* lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(void* self, void* mat2, void* qGroupSize, void* qScaleAndZeros) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(self, mat2, qGroupSize, qScaleAndZeros); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__weight_int8pack_mm_tensor_tensor_tensor)(void* self, void* mat2, void* scales); + HOST_API void* lantern__weight_int8pack_mm_tensor_tensor_tensor(void* self, void* mat2, void* scales) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_int8pack_mm_tensor_tensor_tensor(self, mat2, scales); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_mm_tensor_tensor)(void* sparse, void* dense); HOST_API void* lantern__sparse_mm_tensor_tensor(void* sparse, void* dense) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_mm_tensor_tensor(sparse, dense); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_mm_tensor_tensor_cstringview)(void* sparse, void* dense, void* reduce); @@ -4492,6 +4608,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double(input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* training, void* momentum, void* eps); @@ -4512,8 +4630,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_invstd, void* train, void* eps, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g); HOST_API void* lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count); - HOST_API void* lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count); + HOST_API void* lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_update_stats_tensor_tensor_tensor_double)(void* input, void* running_mean, void* running_var, void* momentum); HOST_API void* lantern_batch_norm_update_stats_tensor_tensor_tensor_double(void* input, void* running_mean, void* running_var, void* momentum) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_update_stats_tensor_tensor_tensor_double(input, running_mean, running_var, momentum); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_is_vulkan_available)(); @@ -4932,6 +5050,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_slice_tensor_intt_intt_intt_intt(void* self, void* dim, void* start, void* end, void* step) { void* ret = _lantern_Tensor_slice_tensor_intt_intt_intt_intt(self, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt)(void* grad_output, void* input_sizes, void* dim, void* start, void* end, void* step); HOST_API void* lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt(void* grad_output, void* input_sizes, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt(grad_output, input_sizes, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); + HOST_API void* lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); + HOST_API void* lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { void* ret = _lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); HOST_API void* lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_slice_scatter_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); @@ -5044,6 +5166,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar(void* self, void* mat1, void* mat2, void* beta, void* alpha) { void* ret = _lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar(self, mat1, mat2, beta, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar)(void* out, void* self, void* mat1, void* mat2, void* beta, void* alpha); HOST_API void* lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar(void* out, void* self, void* mat1, void* mat2, void* beta, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar(out, self, mat1, mat2, beta, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__chunk_cat_tensorlist_intt_intt)(void* tensors, void* dim, void* num_chunks); + HOST_API void* lantern__chunk_cat_tensorlist_intt_intt(void* tensors, void* dim, void* num_chunks) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_cat_tensorlist_intt_intt(tensors, dim, num_chunks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__chunk_cat_out_tensor_tensorlist_intt_intt)(void* out, void* tensors, void* dim, void* num_chunks); + HOST_API void* lantern__chunk_cat_out_tensor_tensorlist_intt_intt(void* out, void* tensors, void* dim, void* num_chunks) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_cat_out_tensor_tensorlist_intt_intt(out, tensors, dim, num_chunks); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_stack_tensorlist_intt)(void* tensors, void* dim); HOST_API void* lantern_stack_tensorlist_intt(void* tensors, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_stack_tensorlist_intt(tensors, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_stack_out_tensor_tensorlist_intt)(void* out, void* tensors, void* dim); @@ -5138,36 +5264,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_std_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_std_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_bool)(void* self, void* unbiased); HOST_API void* lantern_std_mean_tensor_bool(void* self, void* unbiased) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_bool(self, unbiased); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_std_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_prod_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_prod_tensor_scalartype(void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_prod_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_prod_tensor_scalartype)(void* self, void* dtype); @@ -5284,14 +5410,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor__nested_tensor_size_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_size_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_strides_tensor)(void* self); HOST_API void* lantern_Tensor__nested_tensor_strides_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_strides_tensor(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_offsets_tensor)(void* self); - HOST_API void* lantern_Tensor__nested_tensor_offsets_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_storage_offsets_tensor)(void* self); + HOST_API void* lantern_Tensor__nested_tensor_storage_offsets_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_storage_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_from_padded_and_nested_example_tensor_tensor)(void* padded, void* nt_example); HOST_API void* lantern__nested_from_padded_and_nested_example_tensor_tensor(void* padded, void* nt_example) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_from_padded_and_nested_example_tensor_tensor(padded, nt_example); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref)(void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref)(void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor)(void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor)(void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_tensor)(void* self); + HOST_API void* lantern__nested_get_values_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_copy_tensor)(void* self); + HOST_API void* lantern__nested_get_values_copy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_copy_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_offsets_tensor)(void* self); + HOST_API void* lantern__nested_get_offsets_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_lengths_tensor)(void* self); + HOST_API void* lantern__nested_get_lengths_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_lengths_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_ragged_idx_tensor)(void* self); + HOST_API void* lantern__nested_get_ragged_idx_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_ragged_idx_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_min_seqlen_tensor)(void* self); + HOST_API void* lantern__nested_get_min_seqlen_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_min_seqlen_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_max_seqlen_tensor)(void* self); + HOST_API void* lantern__nested_get_max_seqlen_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_max_seqlen_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_jagged_dummy_tensor)(void* any); + HOST_API void* lantern__nested_get_jagged_dummy_tensor(void* any) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_jagged_dummy_tensor(any); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_compute_contiguous_strides_offsets_tensor)(void* nested_size); + HOST_API void* lantern__nested_compute_contiguous_strides_offsets_tensor(void* nested_size) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_compute_contiguous_strides_offsets_tensor(nested_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim); HOST_API void* lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_triplet_margin_loss_tensor_tensor_tensor_double_double_double_bool_intt)(void* anchor, void* positive, void* negative, void* margin, void* p, void* eps, void* swap, void* reduction); @@ -5348,36 +5496,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_var_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_var_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_var_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_bool)(void* self, void* unbiased); HOST_API void* lantern_var_mean_tensor_bool(void* self, void* unbiased) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_bool(self, unbiased); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_view_as_tensor_tensor)(void* self, void* other); HOST_API void* lantern_Tensor_view_as_tensor_tensor(void* self, void* other) { void* ret = _lantern_Tensor_view_as_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_where_tensor_tensor_tensor)(void* condition, void* self, void* other); @@ -5432,6 +5580,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_norm_tensor_scalar(void* self, void* p) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_tensor_scalar(self, p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype)(void* self, void* p, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype(void* self, void* p, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype(self, p, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out, void* save_mean, void* save_invstd, void* reserve, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out, void* save_mean, void* save_invstd, void* reserve, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor)(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* update, void* eps, void* output_mask, void* reserve); + HOST_API void* lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* update, void* eps, void* output_mask, void* reserve) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_tensor)(void* self); HOST_API void* lantern__sparse_sum_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_sum_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_tensor_scalartype)(void* self, void* dtype); @@ -5598,6 +5754,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu) { LANTERN_CHECK_LOADED void* ret = _lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(self, mat1, mat2, beta, alpha, use_gelu); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool)(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu); HOST_API void* lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu) { void* ret = _lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(self, mat1, mat2, beta, alpha, use_gelu); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool)(void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum); + HOST_API void* lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool)(void* out, void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum); + HOST_API void* lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* out, void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(out, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions)(void* nnz, void* dense_dim, void* size, void* blocksize, void* index_dtype, void* options); + HOST_API void* lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(void* nnz, void* dense_dim, void* size, void* blocksize, void* index_dtype, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(nnz, dense_dim, size, blocksize, index_dtype, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions)(void* compressed_indices, void* plain_indices, void* values, void* size, void* options); HOST_API void* lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(void* compressed_indices, void* plain_indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(compressed_indices, plain_indices, values, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_csr_tensor_tensor_tensor_tensor_intarrayref_tensoroptions)(void* crow_indices, void* col_indices, void* values, void* size, void* options); @@ -5630,14 +5792,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions(void* ccol_indices, void* row_indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions(ccol_indices, row_indices, values, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_intarrayref_tensoroptions)(void* size, void* options); HOST_API void* lantern_sparse_coo_tensor_intarrayref_tensoroptions(void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_intarrayref_tensoroptions(size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions)(void* indices, void* values, void* options); - HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(void* indices, void* values, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(indices, values, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions)(void* indices, void* values, void* size, void* options); - HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(indices, values, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions)(void* indices, void* values, void* size, void* options); - HOST_API void* lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(indices, values, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref)(void* indices, void* values, void* size); - HOST_API void* lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(void* indices, void* values, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(indices, values, size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool)(void* indices, void* values, void* options, void* is_coalesced); + HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(void* indices, void* values, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(indices, values, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool)(void* indices, void* values, void* size, void* options, void* is_coalesced); + HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(indices, values, size, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool)(void* indices, void* values, void* size, void* options, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(indices, values, size, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool)(void* indices, void* values, void* size, void* is_coalesced); + HOST_API void* lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(void* indices, void* values, void* size, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(indices, values, size, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout)(void* compressed_indices, void* plain_indices, void* values, void* size, void* layout); HOST_API void* lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout(void* compressed_indices, void* plain_indices, void* values, void* size, void* layout) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout(compressed_indices, plain_indices, values, size, layout); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_csr_tensor_args_tensor_tensor_tensor_intarrayref)(void* crow_indices, void* col_indices, void* values, void* size); @@ -5650,22 +5812,24 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref(void* ccol_indices, void* row_indices, void* values, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref(ccol_indices, row_indices, values, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions)(void* sparse_dim, void* dense_dim, void* size, void* options); HOST_API void* lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions(void* sparse_dim, void* dense_dim, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions(sparse_dim, dense_dim, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions)(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options); - HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(sparse_dim, dense_dim, size, indices, values, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool)(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(sparse_dim, dense_dim, size, indices, values, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { void* ret = _lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { void* ret = _lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_mask_tensor_tensor)(void* self, void* mask); HOST_API void* lantern_Tensor_sparse_mask_tensor_tensor(void* self, void* mask) { void* ret = _lantern_Tensor_sparse_mask_tensor_tensor(self, mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__sparse_mask_projection_tensor_tensor_bool)(void* self, void* mask, void* accumulate_matches); + HOST_API void* lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(void* self, void* mask, void* accumulate_matches) { void* ret = _lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(self, mask, accumulate_matches); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__to_cpu_tensorlist)(void* tensors); HOST_API void* lantern__to_cpu_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern__to_cpu_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_dense_tensor_scalartype)(void* self, void* dtype); - HOST_API void* lantern_Tensor_to_dense_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_to_dense_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_dense_tensor_scalartype)(void* self, void* dtype); - HOST_API void* lantern_Tensor__to_dense_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor__to_dense_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_dense_backward_tensor_tensor)(void* grad, void* input); - HOST_API void* lantern_to_dense_backward_tensor_tensor(void* grad, void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_to_dense_backward_tensor_tensor(grad, input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_dense_tensor_scalartype_bool)(void* self, void* dtype, void* masked_grad); + HOST_API void* lantern_Tensor_to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { void* ret = _lantern_Tensor_to_dense_tensor_scalartype_bool(self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_dense_tensor_scalartype_bool)(void* self, void* dtype, void* masked_grad); + HOST_API void* lantern_Tensor__to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { void* ret = _lantern_Tensor__to_dense_tensor_scalartype_bool(self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_to_dense_backward_tensor_tensor_bool)(void* grad, void* input, void* masked_grad); + HOST_API void* lantern_to_dense_backward_tensor_tensor_bool(void* grad, void* input, void* masked_grad) { LANTERN_CHECK_LOADED void* ret = _lantern_to_dense_backward_tensor_tensor_bool(grad, input, masked_grad); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_dim_tensor)(void* self); HOST_API void* lantern_Tensor_sparse_dim_tensor(void* self) { void* ret = _lantern_Tensor_sparse_dim_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__dimi_tensor)(void* self); @@ -5716,22 +5880,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_unbind_tensor_dimname(void* self, void* dim) { void* ret = _lantern_Tensor_unbind_tensor_dimname(self, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_tensor_intt)(void* self, void* sparse_dim); HOST_API void* lantern_Tensor_to_sparse_tensor_intt(void* self, void* sparse_dim) { void* ret = _lantern_Tensor_to_sparse_tensor_intt(self, sparse_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_tensor_intt)(void* self, void* sparse_dim); + HOST_API void* lantern_Tensor__to_sparse_tensor_intt(void* self, void* sparse_dim) { void* ret = _lantern_Tensor__to_sparse_tensor_intt(self, sparse_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt)(void* self, void* layout, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt)(void* self, void* layout, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_csr_tensor_intt)(void* self, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_csr_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_csr_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_csr_tensor_intt)(void* self, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_csr_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_csr_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_csc_tensor_intt)(void* self, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_csc_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_csc_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_csc_tensor_intt)(void* self, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_csc_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_csc_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_semi_structured_tensor)(void* dense); + HOST_API void* lantern__to_sparse_semi_structured_tensor(void* dense) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_semi_structured_tensor(dense); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_mkldnn_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_Tensor_to_mkldnn_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_to_mkldnn_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); HOST_API void* lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt)(void* self, void* padding, void* stride, void* dilation, void* groups); - HOST_API void* lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(void* self, void* padding, void* stride, void* dilation, void* groups) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(self, padding, stride, dilation, groups); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); + HOST_API void* lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_to_mkldnn_backward_tensor_tensor)(void* grad, void* input); HOST_API void* lantern_to_mkldnn_backward_tensor_tensor(void* grad, void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_to_mkldnn_backward_tensor_tensor(grad, input); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_dynamic_tensor_scalartype_bool)(void* self, void* dtype, void* reduce_range); @@ -5846,8 +6024,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_result_type_scalar_tensor(void* scalar, void* tensor) { LANTERN_CHECK_LOADED void* ret = _lantern_result_type_scalar_tensor(scalar, tensor); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_result_type_scalar_scalar)(void* scalar1, void* scalar2); HOST_API void* lantern_result_type_scalar_scalar(void* scalar1, void* scalar2) { LANTERN_CHECK_LOADED void* ret = _lantern_result_type_scalar_scalar(scalar1, scalar2); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_can_cast_scalartype_scalartype)(void* from, void* to); - HOST_API void* lantern_can_cast_scalartype_scalartype(void* from, void* to) { LANTERN_CHECK_LOADED void* ret = _lantern_can_cast_scalartype_scalartype(from, to); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_can_cast_scalartype_scalartype)(void* from_, void* to); + HOST_API void* lantern_can_cast_scalartype_scalartype(void* from_, void* to) { LANTERN_CHECK_LOADED void* ret = _lantern_can_cast_scalartype_scalartype(from_, to); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_promote_types_scalartype_scalartype)(void* type1, void* type2); HOST_API void* lantern_promote_types_scalartype_scalartype(void* type1, void* type2) { LANTERN_CHECK_LOADED void* ret = _lantern_promote_types_scalartype_scalartype(type1, type2); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__local_scalar_dense_tensor)(void* self); @@ -5944,6 +6122,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_masked_scatter_tensor_tensor_tensor(void* self, void* mask, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_masked_scatter_tensor_tensor_tensor(self, mask, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_masked_scatter_tensor_tensor_tensor)(void* self, void* mask, void* source); HOST_API void* lantern_Tensor_masked_scatter_tensor_tensor_tensor(void* self, void* mask, void* source) { void* ret = _lantern_Tensor_masked_scatter_tensor_tensor_tensor(self, mask, source); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_masked_scatter_backward_tensor_tensor_intarrayref)(void* grad_output, void* mask, void* sizes); + HOST_API void* lantern_masked_scatter_backward_tensor_tensor_intarrayref(void* grad_output, void* mask, void* sizes) { LANTERN_CHECK_LOADED void* ret = _lantern_masked_scatter_backward_tensor_tensor_intarrayref(grad_output, mask, sizes); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__masked_softmax_tensor_tensor_intt_intt)(void* self, void* mask, void* dim, void* mask_type); HOST_API void* lantern__masked_softmax_tensor_tensor_intt_intt(void* self, void* mask, void* dim, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__masked_softmax_tensor_tensor_intt_intt(self, mask, dim, mask_type); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__masked_softmax_backward_tensor_tensor_tensor_intt)(void* grad_output, void* output, void* mask, void* dim); @@ -6496,6 +6676,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_nonzero_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_tensor)(void* self); HOST_API void* lantern_Tensor_nonzero_tensor(void* self) { void* ret = _lantern_Tensor_nonzero_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_nonzero_static_out_tensor_tensor_intt_intt)(void* out, void* self, void* size, void* fill_value); + HOST_API void* lantern_nonzero_static_out_tensor_tensor_intt_intt(void* out, void* self, void* size, void* fill_value) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_static_out_tensor_tensor_intt_intt(out, self, size, fill_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_nonzero_static_tensor_intt_intt)(void* self, void* size, void* fill_value); + HOST_API void* lantern_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_static_tensor_intt_intt(self, size, fill_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_static_tensor_intt_intt)(void* self, void* size, void* fill_value); + HOST_API void* lantern_Tensor_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) { void* ret = _lantern_Tensor_nonzero_static_tensor_intt_intt(self, size, fill_value); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nonzero_numpy_tensor)(void* self); HOST_API void* lantern_nonzero_numpy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_numpy_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_numpy_tensor)(void* self); @@ -6814,6 +7000,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_min_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_min_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_min_tensor)(void* self); HOST_API void* lantern_Tensor_min_tensor(void* self) { void* ret = _lantern_Tensor_min_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_min_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern_min_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_min_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fmin_tensor_tensor)(void* self, void* other); HOST_API void* lantern_fmin_tensor_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_fmin_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_fmin_tensor_tensor)(void* self, void* other); @@ -6918,6 +7106,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_argsort_tensor_bool_intt_bool(void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_tensor_bool_intt_bool(self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_argsort_tensor_bool_intt_bool)(void* self, void* stable, void* dim, void* descending); HOST_API void* lantern_Tensor_argsort_tensor_bool_intt_bool(void* self, void* stable, void* dim, void* descending) { void* ret = _lantern_Tensor_argsort_tensor_bool_intt_bool(self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_argsort_out_tensor_tensor_bool_intt_bool)(void* out, void* self, void* stable, void* dim, void* descending); + HOST_API void* lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_out_tensor_tensor_bool_intt_bool(out, self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_argsort_tensor_dimname_bool)(void* self, void* dim, void* descending); HOST_API void* lantern_argsort_tensor_dimname_bool(void* self, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_tensor_dimname_bool(self, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_argsort_tensor_dimname_bool)(void* self, void* dim, void* descending); @@ -7020,108 +7210,134 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_add_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_scalar)(void* self, void* scalar); HOST_API void* lantern__foreach_add__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_tensor_scalar)(void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add_tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_tensor_scalar)(void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add__tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_mul_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_mul__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_mul_tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_mul__tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_div_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_div__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_div_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_div__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_div_tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_div__tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_maximum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_maximum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_minimum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_minimum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_tensorlist)(void* self); - HOST_API void* lantern__foreach_exp_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero__tensorlist)(void* self); - HOST_API void* lantern__foreach_zero__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp__tensorlist)(void* self); - HOST_API void* lantern__foreach_exp__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_tensorlist)(void* self); - HOST_API void* lantern__foreach_sqrt_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt__tensorlist)(void* self); - HOST_API void* lantern__foreach_sqrt__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs_tensorlist)(void* self); HOST_API void* lantern__foreach_abs_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_abs_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs__tensorlist)(void* self); @@ -7158,6 +7374,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_erfc_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_erfc__tensorlist)(void* self); HOST_API void* lantern__foreach_erfc__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_tensorlist)(void* self); + HOST_API void* lantern__foreach_exp_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp__tensorlist)(void* self); + HOST_API void* lantern__foreach_exp__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1_tensorlist)(void* self); HOST_API void* lantern__foreach_expm1_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_expm1_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1__tensorlist)(void* self); @@ -7166,6 +7386,22 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_floor_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_floor__tensorlist)(void* self); HOST_API void* lantern__foreach_floor__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_tensorlist)(void* self); + HOST_API void* lantern__foreach_frac_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac__tensorlist)(void* self); + HOST_API void* lantern__foreach_frac__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_tensorlist)(void* self); + HOST_API void* lantern__foreach_lgamma_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma__tensorlist)(void* self); + HOST_API void* lantern__foreach_lgamma__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log_tensorlist)(void* self); HOST_API void* lantern__foreach_log_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log__tensorlist)(void* self); @@ -7182,84 +7418,74 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_log2_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log2__tensorlist)(void* self); HOST_API void* lantern__foreach_log2__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_max_tensorlist)(void* self); + HOST_API void* lantern__foreach_max_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_max_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg_tensorlist)(void* self); HOST_API void* lantern__foreach_neg_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg__tensorlist)(void* self); HOST_API void* lantern__foreach_neg__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_tensorlist)(void* self); - HOST_API void* lantern__foreach_tan_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan__tensorlist)(void* self); - HOST_API void* lantern__foreach_tan__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_tensorlist)(void* self); - HOST_API void* lantern__foreach_tanh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh__tensorlist)(void* self); - HOST_API void* lantern__foreach_tanh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_tensorlist)(void* self); - HOST_API void* lantern__foreach_sin_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin__tensorlist)(void* self); - HOST_API void* lantern__foreach_sin__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_tensorlist)(void* self); - HOST_API void* lantern__foreach_sinh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh__tensorlist)(void* self); - HOST_API void* lantern__foreach_sinh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_tensorlist)(void* self); - HOST_API void* lantern__foreach_round_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round__tensorlist)(void* self); - HOST_API void* lantern__foreach_round__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_tensorlist)(void* self); - HOST_API void* lantern__foreach_lgamma_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma__tensorlist)(void* self); - HOST_API void* lantern__foreach_lgamma__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_tensorlist)(void* self); - HOST_API void* lantern__foreach_frac_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac__tensorlist)(void* self); - HOST_API void* lantern__foreach_frac__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_tensorlist_scalar_scalartype)(void* self, void* ord, void* dtype); + HOST_API void* lantern__foreach_norm_tensorlist_scalar_scalartype(void* self, void* ord, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_tensorlist_scalar_scalartype(self, ord, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_scalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_scalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_scalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_arrayrefscalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_arrayrefscalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_scalar_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_scalar_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_scalar_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_scalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_scalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_scalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_arrayrefscalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_arrayrefscalar(self, exponent); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal_tensorlist)(void* self); HOST_API void* lantern__foreach_reciprocal_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal__tensorlist)(void* self); HOST_API void* lantern__foreach_reciprocal__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_tensorlist)(void* self); + HOST_API void* lantern__foreach_round_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round__tensorlist)(void* self); + HOST_API void* lantern__foreach_round__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid_tensorlist)(void* self); HOST_API void* lantern__foreach_sigmoid_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid__tensorlist)(void* self); HOST_API void* lantern__foreach_sigmoid__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign_tensorlist)(void* self); + HOST_API void* lantern__foreach_sign_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign__tensorlist)(void* self); + HOST_API void* lantern__foreach_sign__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_tensorlist)(void* self); + HOST_API void* lantern__foreach_sin_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin__tensorlist)(void* self); + HOST_API void* lantern__foreach_sin__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_tensorlist)(void* self); + HOST_API void* lantern__foreach_sinh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh__tensorlist)(void* self); + HOST_API void* lantern__foreach_sinh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_tensorlist)(void* self); + HOST_API void* lantern__foreach_sqrt_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt__tensorlist)(void* self); + HOST_API void* lantern__foreach_sqrt__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_tensorlist)(void* self); + HOST_API void* lantern__foreach_tan_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan__tensorlist)(void* self); + HOST_API void* lantern__foreach_tan__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_tensorlist)(void* self); + HOST_API void* lantern__foreach_tanh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh__tensorlist)(void* self); + HOST_API void* lantern__foreach_tanh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc_tensorlist)(void* self); HOST_API void* lantern__foreach_trunc_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc__tensorlist)(void* self); HOST_API void* lantern__foreach_trunc__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_tensorlist_scalar)(void* self, void* ord); - HOST_API void* lantern__foreach_norm_tensorlist_scalar(void* self, void* ord) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_tensorlist_scalar(self, ord); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero__tensorlist)(void* self); + HOST_API void* lantern__foreach_zero__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy__tensorlist_tensorlist_bool)(void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy__tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy__tensorlist_tensorlist_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy_tensorlist_tensorlist_bool)(void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy_tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy_tensorlist_tensorlist_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_tensor_tensor_bool_bool)(void* self, void* boundaries, void* out_int32, void* right); HOST_API void* lantern_bucketize_tensor_tensor_bool_bool(void* self, void* boundaries, void* out_int32, void* right) { LANTERN_CHECK_LOADED void* ret = _lantern_bucketize_tensor_tensor_bool_bool(self, boundaries, out_int32, right); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_out_tensor_tensor_tensor_bool_bool)(void* out, void* self, void* boundaries, void* out_int32, void* right); @@ -7272,6 +7498,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor)(void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); HOST_API void* lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor(void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor(sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor)(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); + HOST_API void* lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__convert_indices_from_coo_to_csr_tensor_intt_bool)(void* self, void* size, void* out_int32); HOST_API void* lantern__convert_indices_from_coo_to_csr_tensor_intt_bool(void* self, void* size, void* out_int32) { LANTERN_CHECK_LOADED void* ret = _lantern__convert_indices_from_coo_to_csr_tensor_intt_bool(self, size, out_int32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__convert_indices_from_coo_to_csr_out_tensor_tensor_intt_bool)(void* out, void* self, void* size, void* out_int32); @@ -8132,6 +8360,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_linalg_eig_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eig_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eig_out_tensor_tensor_tensor)(void* eigenvalues, void* eigenvectors, void* self); HOST_API void* lantern_linalg_eig_out_tensor_tensor_tensor(void* eigenvalues, void* eigenvectors, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eig_out_tensor_tensor_tensor(eigenvalues, eigenvectors, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__linalg_eigvals_tensor)(void* self); + HOST_API void* lantern__linalg_eigvals_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__linalg_eigvals_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eigvals_tensor)(void* self); HOST_API void* lantern_linalg_eigvals_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eigvals_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eigvals_out_tensor_tensor)(void* out, void* self); @@ -8250,6 +8480,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool(void* result, void* info, void* A, void* B, void* left, void* check_errors) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool(result, info, A, B, left, check_errors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_solve_tensor_tensor_bool)(void* A, void* B, void* left); HOST_API void* lantern_linalg_solve_tensor_tensor_bool(void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_tensor_tensor_bool(A, B, left); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__spsolve_tensor_tensor_bool)(void* A, void* B, void* left); + HOST_API void* lantern__spsolve_tensor_tensor_bool(void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern__spsolve_tensor_tensor_bool(A, B, left); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_solve_out_tensor_tensor_tensor_bool)(void* out, void* A, void* B, void* left); HOST_API void* lantern_linalg_solve_out_tensor_tensor_tensor_bool(void* out, void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_out_tensor_tensor_tensor_bool(out, A, B, left); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_tensorinv_tensor_intt)(void* self, void* ind); @@ -8292,6 +8524,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_nested_to_padded_tensor_tensor_double_intarrayref(void* self, void* padding, void* output_size) { LANTERN_CHECK_LOADED void* ret = _lantern_nested_to_padded_tensor_tensor_double_intarrayref(self, padding, output_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_serialization_subcmul_tensor_tensor_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__test_serialization_subcmul_tensor_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__test_serialization_subcmul_tensor_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_parallel_materialize_tensor_intt_bool)(void* self, void* num_parallel, void* skip_first); + HOST_API void* lantern__test_parallel_materialize_tensor_intt_bool(void* self, void* num_parallel, void* skip_first) { LANTERN_CHECK_LOADED void* ret = _lantern__test_parallel_materialize_tensor_intt_bool(self, num_parallel, skip_first); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_optional_intlist_tensor_intarrayref)(void* values, void* addends); HOST_API void* lantern__test_optional_intlist_tensor_intarrayref(void* values, void* addends) { LANTERN_CHECK_LOADED void* ret = _lantern__test_optional_intlist_tensor_intarrayref(values, addends); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_optional_filled_intlist_tensor_intarrayref)(void* values, void* addends); @@ -8318,8 +8552,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar(void* data, void* reduce, void* lengths, void* indices, void* offsets, void* axis, void* unsafe, void* initial) { LANTERN_CHECK_LOADED void* ret = _lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar(data, reduce, lengths, indices, offsets, axis, unsafe, initial); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar)(void* grad, void* output, void* data, void* reduce, void* lengths, void* offsets, void* axis, void* initial); HOST_API void* lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar(void* grad, void* output, void* data, void* reduce, void* lengths, void* offsets, void* axis, void* initial) { LANTERN_CHECK_LOADED void* ret = _lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar(grad, output, data, reduce, lengths, offsets, axis, initial); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_pad_sequence_tensorlist_bool_double)(void* sequences, void* batch_first, void* padding_value); - HOST_API void* lantern_pad_sequence_tensorlist_bool_double(void* sequences, void* batch_first, void* padding_value) { LANTERN_CHECK_LOADED void* ret = _lantern_pad_sequence_tensorlist_bool_double(sequences, batch_first, padding_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_pad_sequence_tensorlist_bool_double_cstringview)(void* sequences, void* batch_first, void* padding_value, void* padding_side); + HOST_API void* lantern_pad_sequence_tensorlist_bool_double_cstringview(void* sequences, void* batch_first, void* padding_value, void* padding_side) { LANTERN_CHECK_LOADED void* ret = _lantern_pad_sequence_tensorlist_bool_double_cstringview(sequences, batch_first, padding_value, padding_side); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_flatten_dense_tensors_tensorlist)(void* tensors); HOST_API void* lantern_flatten_dense_tensors_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern_flatten_dense_tensors_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_unflatten_dense_tensors_tensor_tensorlist)(void* flat, void* tensors); @@ -8406,50 +8640,64 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_alias_copy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_alias_copy_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_padded_tensor_tensor_double_intarrayref)(void* self, void* padding, void* output_size); HOST_API void* lantern_Tensor_to_padded_tensor_tensor_double_intarrayref(void* self, void* padding, void* output_size) { void* ret = _lantern_Tensor_to_padded_tensor_tensor_double_intarrayref(self, padding, output_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double)(void* values, void* offsets, void* max_lengths, void* padding_value); + HOST_API void* lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(void* values, void* offsets, void* max_lengths, void* padding_value) { LANTERN_CHECK_LOADED void* ret = _lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(values, offsets, max_lengths, padding_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt)(void* dense, void* offsets, void* total_L); + HOST_API void* lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(void* dense, void* offsets, void* total_L) { LANTERN_CHECK_LOADED void* ret = _lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(dense, offsets, total_L); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_softmax_with_shape_tensor_tensor)(void* self, void* query); HOST_API void* lantern__nested_tensor_softmax_with_shape_tensor_tensor(void* self, void* query) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_softmax_with_shape_tensor_tensor(self, query); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__safe_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); + HOST_API void* lantern__safe_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__safe_softmax_tensor_intt_scalartype(self, dim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* mask_type); HOST_API void* lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* need_weights, void* average_attn_weights, void* mask_type); HOST_API void* lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* need_weights, void* average_attn_weights, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal); - HOST_API void* lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* need_attn_weights, void* is_causal); - HOST_API void* lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* need_attn_weights, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal); - HOST_API void* lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask); - HOST_API void* lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask); - HOST_API void* lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(query, key, value, dropout_p, is_causal, return_debug_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset); - HOST_API void* lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool)(void* query, void* key, void* value, void* compute_log_sumexp, void* is_causal); - HOST_API void* lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* compute_log_sumexp, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(query, key, value, compute_log_sumexp, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs); - HOST_API void* lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool)(void* query, void* key, void* value, void* is_causal); - HOST_API void* lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(void* query, void* key, void* value, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(query, key, value, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool)(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask); - HOST_API void* lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset); - HOST_API void* lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool)(void* query, void* key, void* value, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* compute_log_sumexp, void* causal); - HOST_API void* lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(void* query, void* key, void* value, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* compute_log_sumexp, void* causal) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs); - HOST_API void* lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa); + HOST_API void* lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa); + HOST_API void* lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale, void* enable_gqa); + HOST_API void* lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(query, key, value, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* attn_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* attn_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(query, key, value, dropout_p, is_causal, attn_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double)(void* query, void* key, void* value, void* attn_bias, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* dropout_p, void* is_causal, void* attn_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* dropout_p, void* is_causal, void* attn_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double)(void* grad_out, void* query, void* key, void* value, void* attn_bias, void* grad_input_mask, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale); + HOST_API void* lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* attn_bias, void* grad_input_mask, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double)(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double)(void* grad_out_, void* query, void* key, void* value, void* attn_bias, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* dropout_p, void* grad_input_mask, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(void* grad_out_, void* query, void* key, void* value, void* attn_bias, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* dropout_p, void* grad_input_mask, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double)(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* attn_bias, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* attn_bias, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor)(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale, void* window_size_left, void* window_size_right, void* seqused_k, void* alibi_slopes); + HOST_API void* lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale, void* window_size_left, void* window_size_right, void* seqused_k, void* alibi_slopes) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale, void* window_size_left, void* window_size_right); + HOST_API void* lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale, void* window_size_left, void* window_size_right) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt)(void* query, void* key, void* value, void* bias, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* dropout_p, void* custom_mask_type, void* compute_log_sumexp, void* scale, void* seqlen_k, void* window_size); + HOST_API void* lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(void* query, void* key, void* value, void* bias, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* dropout_p, void* custom_mask_type, void* compute_log_sumexp, void* scale, void* seqlen_k, void* window_size) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool)(void* grad_out_, void* query, void* key, void* value, void* bias, void* out, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* logsumexp, void* dropout_p, void* philox_seed, void* philox_offset, void* custom_mask_type, void* bias_requires_grad, void* scale, void* num_splits_key, void* window_size, void* shared_storage_dqdkdv); + HOST_API void* lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(void* grad_out_, void* query, void* key, void* value, void* bias, void* out, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* logsumexp, void* dropout_p, void* philox_seed, void* philox_offset, void* custom_mask_type, void* bias_requires_grad, void* scale, void* num_splits_key, void* window_size, void* shared_storage_dqdkdv) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double)(void* q, void* k, void* v, void* dropout_p); HOST_API void* lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(void* q, void* k, void* v, void* dropout_p) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(q, k, v, dropout_p); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt)(void* self, void* dropout_p, void* seed, void* offset); + HOST_API void* lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(void* self, void* dropout_p, void* seed, void* offset) { LANTERN_CHECK_LOADED void* ret = _lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(self, dropout_p, seed, offset); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask); HOST_API void* lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_airy_ai_tensor)(void* x); HOST_API void* lantern_special_airy_ai_tensor(void* x) { LANTERN_CHECK_LOADED void* ret = _lantern_special_airy_ai_tensor(x); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_airy_ai_out_tensor_tensor)(void* out, void* x); HOST_API void* lantern_special_airy_ai_out_tensor_tensor(void* out, void* x) { LANTERN_CHECK_LOADED void* ret = _lantern_special_airy_ai_out_tensor_tensor(out, x); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value); - HOST_API void* lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights); - HOST_API void* lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) { LANTERN_CHECK_LOADED void* ret = _lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_bessel_j0_tensor)(void* self); HOST_API void* lantern_special_bessel_j0_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_special_bessel_j0_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_bessel_j0_out_tensor_tensor)(void* out, void* self); @@ -8642,8 +8890,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foobar_tensor_bool_bool_bool(void* self, void* arg1, void* arg2, void* arg3) { LANTERN_CHECK_LOADED void* ret = _lantern__foobar_tensor_bool_bool_bool(self, arg1, arg2, arg3); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__propagate_xla_data_tensor_tensor)(void* input, void* output); + HOST_API void* lantern__propagate_xla_data_tensor_tensor(void* input, void* output) { LANTERN_CHECK_LOADED void* ret = _lantern__propagate_xla_data_tensor_tensor(input, output); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt)(void* out, void* self, void* other, void* self_num_batch_dims); HOST_API void* lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(void* out, void* self, void* other, void* self_num_batch_dims) { LANTERN_CHECK_LOADED void* ret = _lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(out, self, other, self_num_batch_dims); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__cudnn_ctc_loss_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intt_bool_bool)(void* out0, void* out1, void* log_probs, void* targets, void* input_lengths, void* target_lengths, void* blank, void* deterministic, void* zero_infinity); @@ -8672,6 +8932,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_add_out_tensor_tensor_scalar_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern_add_out_tensor_tensor_scalar_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool)(void* out, void* theta, void* size, void* align_corners); HOST_API void* lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool(void* out, void* theta, void* size, void* align_corners) { LANTERN_CHECK_LOADED void* ret = _lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool(out, theta, size, align_corners); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_functorch_fallback_out_tensor_tensor_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__test_functorch_fallback_out_tensor_tensor_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__test_functorch_fallback_out_tensor_tensor_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bartlett_window_out_tensor_intt)(void* out, void* window_length); HOST_API void* lantern_bartlett_window_out_tensor_intt(void* out, void* window_length) { LANTERN_CHECK_LOADED void* ret = _lantern_bartlett_window_out_tensor_intt(out, window_length); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bartlett_window_out_tensor_intt_bool)(void* out, void* window_length, void* periodic); @@ -8726,8 +8988,6 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* exponential_average_factor, void* epsilon) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor)(void* out0, void* out1, void* out2, void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace); HOST_API void* lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(void* out0, void* out1, void* out2, void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(out0, out1, out2, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); - HOST_API void* lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mps_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups); @@ -8774,6 +9034,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt(void* out, void* grad, void* weight, void* indices, void* offsets, void* offset2bag, void* mode, void* padding_idx) { LANTERN_CHECK_LOADED void* ret = _lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt(out, grad, weight, indices, offsets, offset2bag, mode, padding_idx); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat)(void* out, void* size, void* names, void* memory_format); HOST_API void* lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat(void* out, void* size, void* names, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat(out, size, names, memory_format); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_empty_permuted_out_tensor_intarrayref_intarrayref)(void* out, void* size, void* physical_layout); + HOST_API void* lantern_empty_permuted_out_tensor_intarrayref_intarrayref(void* out, void* size, void* physical_layout) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_permuted_out_tensor_intarrayref_intarrayref(out, size, physical_layout); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_new_empty_out_tensor_tensor_intarrayref)(void* out, void* self, void* size); HOST_API void* lantern_new_empty_out_tensor_tensor_intarrayref(void* out, void* self, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern_new_empty_out_tensor_tensor_intarrayref(out, self, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_new_empty_strided_out_tensor_tensor_intarrayref_intarrayref)(void* out, void* self, void* size, void* stride); @@ -8806,6 +9068,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_fill_out_tensor_tensor_scalar(void* out, void* self, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern_fill_out_tensor_tensor_scalar(out, self, value); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fill_out_tensor_tensor_tensor)(void* out, void* self, void* value); HOST_API void* lantern_fill_out_tensor_tensor_tensor(void* out, void* self, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern_fill_out_tensor_tensor_tensor(out, self, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_floor_divide_out_tensor_tensor_scalar)(void* out, void* self, void* other); + HOST_API void* lantern_floor_divide_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_floor_divide_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_full_out_tensor_intarrayref_scalar_dimnamelist)(void* out, void* size, void* fill_value, void* names); HOST_API void* lantern_full_out_tensor_intarrayref_scalar_dimnamelist(void* out, void* size, void* fill_value, void* names) { LANTERN_CHECK_LOADED void* ret = _lantern_full_out_tensor_intarrayref_scalar_dimnamelist(out, size, fill_value, names); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_full_like_out_tensor_tensor_scalar_memoryformat)(void* out, void* self, void* fill_value, void* memory_format); @@ -8844,12 +9108,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* N, void* C, void* HxW, void* group, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double(out0, out1, out2, input, weight, bias, N, C, HxW, group, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool)(void* out0, void* out1, void* out2, void* grad_out, void* input, void* mean, void* rstd, void* weight, void* N, void* C, void* HxW, void* group, void* output_mask); HOST_API void* lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool(void* out0, void* out1, void* out2, void* grad_out, void* input, void* mean, void* rstd, void* weight, void* N, void* C, void* HxW, void* group, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool(out0, out1, out2, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool)(void* out, void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(out, self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool)(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(out, self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool)(void* out, void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(out, self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(out, self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_isnan_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern_isnan_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_isnan_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_layer_norm_out_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_double)(void* out0, void* out1, void* out2, void* input, void* normalized_shape, void* weight, void* bias, void* eps); @@ -8886,6 +9150,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); + HOST_API void* lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_median_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern_median_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_median_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nanmedian_out_tensor_tensor)(void* out, void* self); @@ -8920,6 +9186,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_mul_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_mul_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double(input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_stats_out_tensor_tensor_tensor_double)(void* out0, void* out1, void* input, void* eps); HOST_API void* lantern_batch_norm_stats_out_tensor_tensor_tensor_double(void* out0, void* out1, void* input, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_stats_out_tensor_tensor_tensor_double(out0, out1, input, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_gather_stats_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt)(void* out0, void* out1, void* input, void* mean, void* invstd, void* running_mean, void* running_var, void* momentum, void* eps, void* count); @@ -8930,8 +9198,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(void* out0, void* out1, void* out2, void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_invstd, void* train, void* eps, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(out0, out1, out2, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool)(void* out0, void* out1, void* out2, void* out3, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g); HOST_API void* lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(void* out0, void* out1, void* out2, void* out3, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(out0, out1, out2, out3, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count); - HOST_API void* lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count); + HOST_API void* lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double)(void* out0, void* out1, void* input, void* running_mean, void* running_var, void* momentum); HOST_API void* lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double(void* out0, void* out1, void* input, void* running_mean, void* running_var, void* momentum) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double(out0, out1, input, running_mean, running_var, momentum); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nnpack_spatial_convolution_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref)(void* out, void* input, void* weight, void* bias, void* padding, void* stride); @@ -9004,8 +9272,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt(void* out, void* self, void* split_sizes, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt(out, self, split_sizes, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sum_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_sum_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_sum_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_prod_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_prod_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_prod_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mkldnn_transpose_out_tensor_tensor_intt_intt)(void* out, void* self, void* dim0, void* dim1); @@ -9026,10 +9294,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__nested_tensor_size_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_size_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_strides_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern__nested_tensor_strides_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_strides_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_storage_offsets_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern__nested_tensor_storage_offsets_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_storage_offsets_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor)(void* out, void* padded, void* nt_example); HOST_API void* lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(void* out, void* padded, void* nt_example) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(out, padded, nt_example); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref)(void* out, void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(out, self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor)(void* out, void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(out, self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* out, void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(out, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_copy_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern__nested_get_values_copy_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_copy_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* out, void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim); HOST_API void* lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(void* out, void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(out, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__unique_out_tensor_tensor_tensor_bool_bool)(void* out0, void* out1, void* self, void* sorted, void* return_inverse); @@ -9044,8 +9318,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool(void* out0, void* out1, void* out2, void* self, void* sorted, void* return_inverse, void* return_counts) { LANTERN_CHECK_LOADED void* ret = _lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool(out0, out1, out2, self, sorted, return_inverse, return_counts); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__unsafe_view_out_tensor_tensor_intarrayref)(void* out, void* self, void* size); HOST_API void* lantern__unsafe_view_out_tensor_tensor_intarrayref(void* out, void* self, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_view_out_tensor_tensor_intarrayref(out, self, size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt)(void* out0, void* out1, void* v, void* g, void* dim); HOST_API void* lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt(void* out0, void* out1, void* v, void* g, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt(out0, out1, v, g, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__weight_norm_interface_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* out0, void* out1, void* grad_w, void* saved_v, void* saved_g, void* saved_norms, void* dim); @@ -9072,6 +9346,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_norm_out_tensor_tensor_scalar(void* out, void* self, void* p) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_out_tensor_tensor_scalar(out, self, p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype)(void* out, void* self, void* p, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype(void* out, void* self, void* p, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype(out, self, p, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_out_tensor_tensor_intarrayref)(void* out, void* self, void* dim); HOST_API void* lantern__sparse_sum_out_tensor_tensor_intarrayref(void* out, void* self, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_sum_out_tensor_tensor_intarrayref(out, self, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_backward_out_tensor_tensor_tensor_intarrayref)(void* out, void* grad, void* self, void* dim); @@ -9120,8 +9398,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_sparse_coo_tensor_out_tensor_intarrayref(void* out, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_out_tensor_intarrayref(out, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref)(void* out, void* sparse_dim, void* dense_dim, void* size); HOST_API void* lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref(void* out, void* sparse_dim, void* dense_dim, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref(out, sparse_dim, dense_dim, size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor)(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values); - HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(out, sparse_dim, dense_dim, size, indices, values); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool)(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(out, sparse_dim, dense_dim, size, indices, values, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt)(void* out, void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt(void* out, void* self, void* size, void* sparse_dim, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt(out, self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_resize_tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); @@ -9132,8 +9410,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_mask_out_tensor_tensor_tensor)(void* out, void* self, void* mask); HOST_API void* lantern_sparse_mask_out_tensor_tensor_tensor(void* out, void* self, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_mask_out_tensor_tensor_tensor(out, self, mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__to_dense_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); - HOST_API void* lantern__to_dense_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__to_dense_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool)(void* out, void* self, void* mask, void* accumulate_matches); + HOST_API void* lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(void* out, void* self, void* mask, void* accumulate_matches) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(out, self, mask, accumulate_matches); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_dense_out_tensor_tensor_scalartype_bool)(void* out, void* self, void* dtype, void* masked_grad); + HOST_API void* lantern__to_dense_out_tensor_tensor_scalartype_bool(void* out, void* self, void* dtype, void* masked_grad) { LANTERN_CHECK_LOADED void* ret = _lantern__to_dense_out_tensor_tensor_scalartype_bool(out, self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__coalesce_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern__coalesce_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__coalesce_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__coalesced_out_tensor_tensor_bool)(void* out, void* self, void* coalesced); @@ -9144,24 +9424,24 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool(void* out, void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool(out, self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_copy_sparse_to_sparse_tensor_tensor_bool)(void* self, void* src, void* non_blocking); HOST_API void* lantern_copy_sparse_to_sparse_tensor_tensor_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern_copy_sparse_to_sparse_tensor_tensor_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_out_tensor_tensor_intt)(void* out, void* self, void* sparse_dim); - HOST_API void* lantern_to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_out_tensor_tensor_intt(out, self, sparse_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt)(void* out, void* self, void* layout, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(out, self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_csr_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); - HOST_API void* lantern_to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_csr_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_csc_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); - HOST_API void* lantern_to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_csc_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_out_tensor_tensor_intt)(void* out, void* self, void* sparse_dim); + HOST_API void* lantern__to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_out_tensor_tensor_intt(out, self, sparse_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt)(void* out, void* self, void* layout, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(out, self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_csr_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); + HOST_API void* lantern__to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_csr_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_csc_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); + HOST_API void* lantern__to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_csc_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_to_mkldnn_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_to_mkldnn_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_to_mkldnn_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); HOST_API void* lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(out, self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups); - HOST_API void* lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(void* out, void* self, void* padding, void* stride, void* dilation, void* groups) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(out, self, padding, stride, dilation, groups); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); + HOST_API void* lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(out, self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool)(void* out, void* self, void* dtype, void* reduce_range); HOST_API void* lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool(void* out, void* self, void* dtype, void* reduce_range) { LANTERN_CHECK_LOADED void* ret = _lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool(out, self, dtype, reduce_range); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_out_tensor_tensor_double_intt_scalartype)(void* out, void* self, void* scale, void* zero_point, void* dtype); @@ -9320,8 +9600,6 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool(void* out, void* self, void* bins, void* weight, void* density) { LANTERN_CHECK_LOADED void* ret = _lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool(out, self, bins, weight, density); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_remainder_out_tensor_scalar_tensor)(void* out, void* self, void* other); HOST_API void* lantern_remainder_out_tensor_scalar_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_remainder_out_tensor_scalar_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_argsort_out_tensor_tensor_bool_intt_bool)(void* out, void* self, void* stable, void* dim, void* descending); - HOST_API void* lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_out_tensor_tensor_bool_intt_bool(out, self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt)(void* out, void* grad_in, void* input_sizes, void* dim, void* size, void* step); HOST_API void* lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(void* out, void* grad_in, void* input_sizes, void* dim, void* size, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(out, grad_in, input_sizes, dim, size, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_normal_out_tensor_tensor_double_double_generator)(void* out, void* self, void* mean, void* std, void* generator); @@ -9336,60 +9614,70 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt(void* self, void* growth_tracker, void* found_inf, void* scale_growth_factor, void* scale_backoff_factor, void* growth_interval) { LANTERN_CHECK_LOADED void* ret = _lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar)(void* out, void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_tensorlist)(void* self); - HOST_API void* lantern__foreach_zero_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_abs_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_abs_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_acos_out_tensorlist_tensorlist)(void* out, void* self); @@ -9408,10 +9696,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_erf_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erf_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_erfc_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_erfc_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_expm1_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_expm1_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_floor_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_floor_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out, self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out, self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_log_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log10_out_tensorlist_tensorlist)(void* out, void* self); @@ -9420,50 +9718,46 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_log1p_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log1p_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log2_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_log2_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_max_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_max_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_max_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_neg_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype)(void* out, void* self, void* ord, void* dtype); + HOST_API void* lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(void* out, void* self, void* ord, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(out, self, ord, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(out, self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_scalar(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_scalar(out, self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(out, self, exponent); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_reciprocal_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_sigmoid_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sign_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_trunc_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* ord); - HOST_API void* lantern__foreach_norm_out_tensorlist_tensorlist_scalar(void* out, void* self, void* ord) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_out_tensorlist_tensorlist_scalar(out, self, ord); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out, self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out, self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_tensorlist)(void* self); + HOST_API void* lantern__foreach_zero_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool)(void* out, void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(void* out, void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(out, self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_out_tensor_scalar_tensor_bool_bool)(void* out, void* self, void* boundaries, void* out_int32, void* right); HOST_API void* lantern_bucketize_out_tensor_scalar_tensor_bool_bool(void* out, void* self, void* boundaries, void* out_int32, void* right) { LANTERN_CHECK_LOADED void* ret = _lantern_bucketize_out_tensor_scalar_tensor_bool_bool(out, self, boundaries, out_int32, right); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor)(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); - HOST_API void* lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt)(void* out, void* glu, void* x, void* dx, void* dim); HOST_API void* lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt(void* out, void* glu, void* x, void* dx, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt(out, glu, x, dx, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_glu_backward_jvp_out_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* out, void* grad_x, void* grad_glu, void* x, void* dgrad_glu, void* dx, void* dim); @@ -9588,20 +9882,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double(void* out, void* q, void* k, void* v, void* dropout_p) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double(out, q, k, v, dropout_p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor)(void* out, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask); HOST_API void* lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(void* out, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(out, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out0, void* out1, void* out2, void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value); - HOST_API void* lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out0, void* out1, void* out2, void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* out0, void* out1, void* out2, void* out3, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights); - HOST_API void* lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* out0, void* out1, void* out2, void* out3, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) { LANTERN_CHECK_LOADED void* ret = _lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foobar_out_tensor_tensor_bool_bool_bool)(void* out, void* self, void* arg1, void* arg2, void* arg3); HOST_API void* lantern__foobar_out_tensor_tensor_bool_bool_bool(void* out, void* self, void* arg1, void* arg2, void* arg3) { LANTERN_CHECK_LOADED void* ret = _lantern__foobar_out_tensor_tensor_bool_bool_bool(out, self, arg1, arg2, arg3); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* out, void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* out, void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } /* Autogen Headers -- End */ #ifdef __cplusplus @@ -9711,8 +10021,12 @@ bool lanternLoadLibrary(const std::string &libPath, std::string *pError) } pLibrary = (void *)::LoadLibraryEx(libFile.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS); +#elif defined(__APPLE__) || defined(__MACH__) + pLibrary = ::dlopen(libFile.c_str(), RTLD_NOW); #else - pLibrary = ::dlopen(libFile.c_str(), RTLD_NOW | RTLD_GLOBAL); + // On Linux use RTLD_DEEPBIND to avoid conflicts and make sure libtorch calls into + // the bundled MKL BLAS. + pLibrary = dlopen(libFile.c_str(), RTLD_NOW | RTLD_DEEPBIND); #endif if (pLibrary == NULL) { @@ -10261,7 +10575,17 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_align_as_tensor_tensor) LOAD_SYMBOL(_lantern_align_tensors_tensorlist) LOAD_SYMBOL(_lantern__assert_async_tensor) + LOAD_SYMBOL(_lantern__assert_async_tensor_cstringview) + LOAD_SYMBOL(_lantern__assert_scalar_scalar_cstringview) + LOAD_SYMBOL(_lantern__functional_assert_scalar_scalar_cstringview_tensor) + LOAD_SYMBOL(_lantern__functional_assert_async_tensor_cstringview_tensor) LOAD_SYMBOL(_lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype) + LOAD_SYMBOL(_lantern__print_cstringview) + LOAD_SYMBOL(_lantern_sym_constrain_range_scalar_intt_intt) + LOAD_SYMBOL(_lantern_sym_constrain_range_for_size_scalar_intt_intt) + LOAD_SYMBOL(_lantern__functional_sym_constrain_range_scalar_intt_intt_tensor) + LOAD_SYMBOL(_lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor) + LOAD_SYMBOL(_lantern__make_dep_token_tensoroptions_memoryformat) LOAD_SYMBOL(_lantern_Tensor_refine_names_tensor_dimnamelist) LOAD_SYMBOL(_lantern__use_cudnn_ctc_loss_tensor_tensor_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern__use_cudnn_ctc_loss_tensor_tensor_tensor_tensor_intt) @@ -10370,9 +10694,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__is_any_true_tensor) LOAD_SYMBOL(_lantern_Tensor__is_any_true_tensor) LOAD_SYMBOL(_lantern__test_check_tensor_tensor) + LOAD_SYMBOL(_lantern__test_functorch_fallback_tensor_tensor) LOAD_SYMBOL(_lantern_all_tensor_intt_bool) LOAD_SYMBOL(_lantern_Tensor_all_tensor_intt_bool) + LOAD_SYMBOL(_lantern_all_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern_Tensor_all_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_all_out_tensor_tensor_intt_bool) + LOAD_SYMBOL(_lantern_all_out_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_all_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_all_tensor_dimname_bool) LOAD_SYMBOL(_lantern_all_out_tensor_tensor_dimname_bool) @@ -10380,7 +10708,10 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_allclose_tensor_tensor_double_double_bool) LOAD_SYMBOL(_lantern_any_tensor_intt_bool) LOAD_SYMBOL(_lantern_Tensor_any_tensor_intt_bool) + LOAD_SYMBOL(_lantern_any_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern_Tensor_any_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_any_out_tensor_tensor_intt_bool) + LOAD_SYMBOL(_lantern_any_out_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_any_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_any_tensor_dimname_bool) LOAD_SYMBOL(_lantern_any_out_tensor_tensor_dimname_bool) @@ -10493,6 +10824,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_copysign_tensor_scalar) LOAD_SYMBOL(_lantern_Tensor_copysign__tensor_scalar) LOAD_SYMBOL(_lantern_copysign_out_tensor_tensor_scalar) + LOAD_SYMBOL(_lantern__lazy_clone_tensor) + LOAD_SYMBOL(_lantern_Tensor__lazy_clone_tensor) LOAD_SYMBOL(_lantern_logical_not_tensor) LOAD_SYMBOL(_lantern_Tensor_logical_not_tensor) LOAD_SYMBOL(_lantern_Tensor_logical_not__tensor) @@ -10642,6 +10975,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_cudnn_batch_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor) LOAD_SYMBOL(_lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) + LOAD_SYMBOL(_lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern__mps_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_mps_convolution_transpose_backward_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_stdarraybool) @@ -10770,6 +11104,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__embedding_bag_per_sample_weights_backward_tensor_tensor_tensor_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat) LOAD_SYMBOL(_lantern_empty_intarrayref_tensoroptions_memoryformat) + LOAD_SYMBOL(_lantern_empty_permuted_intarrayref_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_empty_strided_tensor_intarrayref_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_full_tensor_intarrayref_scalar_tensoroptions) @@ -10891,13 +11226,16 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__fft_c2c_tensor_intarrayref_intt_bool) LOAD_SYMBOL(_lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool) LOAD_SYMBOL(_lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt) - LOAD_SYMBOL(_lantern__cufft_get_plan_cache_size_intt) - LOAD_SYMBOL(_lantern__cufft_get_plan_cache_max_size_intt) - LOAD_SYMBOL(_lantern__cufft_set_plan_cache_max_size_intt_intt) - LOAD_SYMBOL(_lantern__cufft_clear_plan_cache_intt) - LOAD_SYMBOL(_lantern_index_tensor_constclistcoptionaltensor) - LOAD_SYMBOL(_lantern_Tensor_index_tensor_constclistcoptionaltensor) - LOAD_SYMBOL(_lantern_index_out_tensor_tensor_constclistcoptionaltensor) + LOAD_SYMBOL(_lantern__cufft_get_plan_cache_size_deviceindex) + LOAD_SYMBOL(_lantern__cufft_get_plan_cache_max_size_deviceindex) + LOAD_SYMBOL(_lantern__cufft_set_plan_cache_max_size_deviceindex_intt) + LOAD_SYMBOL(_lantern__cufft_clear_plan_cache_deviceindex) + LOAD_SYMBOL(_lantern_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern_Tensor_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern_index_out_tensor_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern__unsafe_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar) + LOAD_SYMBOL(_lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor) LOAD_SYMBOL(_lantern_index_copy_out_tensor_tensor_intt_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_index_copy__tensor_intt_tensor_tensor) LOAD_SYMBOL(_lantern_index_copy_tensor_intt_tensor_tensor) @@ -10905,11 +11243,12 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_index_copy__tensor_dimname_tensor_tensor) LOAD_SYMBOL(_lantern_index_copy_tensor_dimname_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_index_copy_tensor_dimname_tensor_tensor) - LOAD_SYMBOL(_lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool) LOAD_SYMBOL(_lantern_isclose_tensor_tensor_double_double_bool) LOAD_SYMBOL(_lantern_Tensor_isclose_tensor_tensor_double_double_bool) @@ -10956,6 +11295,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_layer_norm_tensor_intarrayref_tensor_tensor_double_bool) LOAD_SYMBOL(_lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double) LOAD_SYMBOL(_lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool) + LOAD_SYMBOL(_lantern_rms_norm_tensor_intarrayref_tensor_double) LOAD_SYMBOL(_lantern_nan_to_num_tensor_double_double_double) LOAD_SYMBOL(_lantern_Tensor_nan_to_num_tensor_double_double_double) LOAD_SYMBOL(_lantern_nan_to_num__tensor_double_double_double) @@ -10968,10 +11308,22 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_linear_backward_input_intarrayref_tensor_tensor) LOAD_SYMBOL(_lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool) + LOAD_SYMBOL(_lantern__cslt_compress_tensor) + LOAD_SYMBOL(_lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt) + LOAD_SYMBOL(_lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__sparse_semi_structured_tile_tensor_cstringview_bool) + LOAD_SYMBOL(_lantern__sparse_semi_structured_apply_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_semi_structured_apply_dense_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype) + LOAD_SYMBOL(_lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype) + LOAD_SYMBOL(_lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype) + LOAD_SYMBOL(_lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview) LOAD_SYMBOL(_lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_int8_weight_tensor_tensor_tensor_tensor_scalar_scalar_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_quantize_weight_tensor) LOAD_SYMBOL(_lantern_fbgemm_pack_gemm_matrix_fp16_tensor) + LOAD_SYMBOL(_lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_fp16_weight_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_fbgemm_pack_quantized_matrix_tensor) @@ -10982,7 +11334,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_ldexp__tensor_tensor) LOAD_SYMBOL(_lantern_ldexp_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_linspace_scalar_scalar_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_tensor_tensor_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_tensor_scalar_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_scalar_tensor_intt_tensoroptions) LOAD_SYMBOL(_lantern_linspace_out_tensor_scalar_scalar_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_tensor_tensor_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_tensor_scalar_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_scalar_tensor_intt) LOAD_SYMBOL(_lantern_log_tensor) LOAD_SYMBOL(_lantern_Tensor_log_tensor) LOAD_SYMBOL(_lantern_log__tensor) @@ -11022,7 +11380,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_xlogy_out_tensor_scalar_tensor) LOAD_SYMBOL(_lantern_xlogy_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_logspace_scalar_scalar_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_tensor_tensor_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_tensor_scalar_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_scalar_tensor_intt_double_tensoroptions) LOAD_SYMBOL(_lantern_logspace_out_tensor_scalar_scalar_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_tensor_tensor_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_tensor_scalar_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_scalar_tensor_intt_double) LOAD_SYMBOL(_lantern_log_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern_Tensor_log_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern_log_softmax_out_tensor_tensor_intt_scalartype) @@ -11084,9 +11448,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_max_pool3d_backward_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) + LOAD_SYMBOL(_lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_mean_tensor_scalartype) LOAD_SYMBOL(_lantern_Tensor_mean_tensor_scalartype) + LOAD_SYMBOL(_lantern_mean_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern_mean_tensor_intarrayref_bool_scalartype) LOAD_SYMBOL(_lantern_Tensor_mean_tensor_intarrayref_bool_scalartype) LOAD_SYMBOL(_lantern_mean_out_tensor_tensor_intarrayref_bool_scalartype) @@ -11138,6 +11504,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mm_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_mm_tensor_tensor) LOAD_SYMBOL(_lantern_mm_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__int_mm_tensor_tensor) + LOAD_SYMBOL(_lantern__int_mm_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__convert_weight_to_int4pack_tensor_intt) + LOAD_SYMBOL(_lantern__weight_int4pack_mm_tensor_tensor_intt_tensor) + LOAD_SYMBOL(_lantern__weight_int8pack_mm_tensor_tensor_tensor) LOAD_SYMBOL(_lantern__sparse_mm_tensor_tensor) LOAD_SYMBOL(_lantern__sparse_mm_tensor_tensor_cstringview) LOAD_SYMBOL(_lantern__sparse_sparse_matmul_tensor_tensor) @@ -11178,6 +11549,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_native_batch_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double) + LOAD_SYMBOL(_lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) @@ -11398,6 +11770,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_slice_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_Tensor_slice_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt) + LOAD_SYMBOL(_lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt) + LOAD_SYMBOL(_lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_Tensor_slice_scatter_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_select_scatter_tensor_tensor_intt_intt) @@ -11454,6 +11828,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_sspaddmm_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar) + LOAD_SYMBOL(_lantern__chunk_cat_tensorlist_intt_intt) + LOAD_SYMBOL(_lantern__chunk_cat_out_tensor_tensorlist_intt_intt) LOAD_SYMBOL(_lantern_stack_tensorlist_intt) LOAD_SYMBOL(_lantern_stack_out_tensor_tensorlist_intt) LOAD_SYMBOL(_lantern__stack_tensorlist_intt) @@ -11501,21 +11877,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_std_tensor_bool) LOAD_SYMBOL(_lantern_std_tensor_intarrayref_bool_bool) LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_tensor_intarrayref_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_tensor_intarrayref_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_prod_tensor_scalartype) LOAD_SYMBOL(_lantern_Tensor_prod_tensor_scalartype) LOAD_SYMBOL(_lantern_prod_tensor_intt_bool_scalartype) @@ -11574,10 +11950,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__nested_from_padded_tensor_tensor_bool) LOAD_SYMBOL(_lantern_Tensor__nested_tensor_size_tensor) LOAD_SYMBOL(_lantern_Tensor__nested_tensor_strides_tensor) - LOAD_SYMBOL(_lantern_Tensor__nested_tensor_offsets_tensor) + LOAD_SYMBOL(_lantern_Tensor__nested_tensor_storage_offsets_tensor) LOAD_SYMBOL(_lantern__nested_from_padded_and_nested_example_tensor_tensor) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_copy_tensor) + LOAD_SYMBOL(_lantern__nested_get_offsets_tensor) + LOAD_SYMBOL(_lantern__nested_get_lengths_tensor) + LOAD_SYMBOL(_lantern__nested_get_ragged_idx_tensor) + LOAD_SYMBOL(_lantern__nested_get_min_seqlen_tensor) + LOAD_SYMBOL(_lantern__nested_get_max_seqlen_tensor) + LOAD_SYMBOL(_lantern__nested_get_jagged_dummy_tensor) + LOAD_SYMBOL(_lantern__nested_compute_contiguous_strides_offsets_tensor) LOAD_SYMBOL(_lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_triplet_margin_loss_tensor_tensor_tensor_double_double_double_bool_intt) LOAD_SYMBOL(_lantern_trunc_tensor) @@ -11606,21 +11993,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_var_tensor_bool) LOAD_SYMBOL(_lantern_var_tensor_intarrayref_bool_bool) LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_tensor_intarrayref_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_tensor_intarrayref_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_Tensor_view_as_tensor_tensor) LOAD_SYMBOL(_lantern_where_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_where_tensor_tensor_tensor) @@ -11648,6 +12035,10 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_binomial_tensor_tensor_generator) LOAD_SYMBOL(_lantern_native_norm_tensor_scalar) LOAD_SYMBOL(_lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype) + LOAD_SYMBOL(_lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor) LOAD_SYMBOL(_lantern__sparse_sum_tensor) LOAD_SYMBOL(_lantern__sparse_sum_tensor_scalartype) LOAD_SYMBOL(_lantern__sparse_sum_tensor_intarrayref) @@ -11731,6 +12122,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__addmm_activation_out_tensor_tensor_tensor_tensor_scalar_scalar_bool) LOAD_SYMBOL(_lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool) LOAD_SYMBOL(_lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool) + LOAD_SYMBOL(_lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions) LOAD_SYMBOL(_lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_csr_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_csc_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) @@ -11747,24 +12141,25 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_bsr_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_coo_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_tensoroptions) - LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool) + LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool) + LOAD_SYMBOL(_lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout) LOAD_SYMBOL(_lantern__validate_sparse_csr_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_csc_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_bsr_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool) LOAD_SYMBOL(_lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_Tensor_sparse_mask_tensor_tensor) + LOAD_SYMBOL(_lantern_Tensor__sparse_mask_projection_tensor_tensor_bool) LOAD_SYMBOL(_lantern__to_cpu_tensorlist) - LOAD_SYMBOL(_lantern_Tensor_to_dense_tensor_scalartype) - LOAD_SYMBOL(_lantern_Tensor__to_dense_tensor_scalartype) - LOAD_SYMBOL(_lantern_to_dense_backward_tensor_tensor) + LOAD_SYMBOL(_lantern_Tensor_to_dense_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern_Tensor__to_dense_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern_to_dense_backward_tensor_tensor_bool) LOAD_SYMBOL(_lantern_Tensor_sparse_dim_tensor) LOAD_SYMBOL(_lantern_Tensor__dimi_tensor) LOAD_SYMBOL(_lantern_Tensor_dense_dim_tensor) @@ -11790,14 +12185,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unbind_tensor_dimname) LOAD_SYMBOL(_lantern_Tensor_unbind_tensor_dimname) LOAD_SYMBOL(_lantern_Tensor_to_sparse_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_csr_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_csr_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_csc_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_csc_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_semi_structured_tensor) LOAD_SYMBOL(_lantern_Tensor_to_mkldnn_tensor_scalartype) LOAD_SYMBOL(_lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) - LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt) + LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) LOAD_SYMBOL(_lantern_to_mkldnn_backward_tensor_tensor) LOAD_SYMBOL(_lantern_quantize_per_tensor_dynamic_tensor_scalartype_bool) LOAD_SYMBOL(_lantern_quantize_per_tensor_tensor_double_intt_scalartype) @@ -11904,6 +12306,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_masked_scatter__tensor_tensor_tensor) LOAD_SYMBOL(_lantern_masked_scatter_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_masked_scatter_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern_masked_scatter_backward_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__masked_softmax_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern__masked_softmax_backward_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_view_tensor_intarrayref) @@ -12180,6 +12583,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_nonzero_out_tensor_tensor) LOAD_SYMBOL(_lantern_nonzero_tensor) LOAD_SYMBOL(_lantern_Tensor_nonzero_tensor) + LOAD_SYMBOL(_lantern_nonzero_static_out_tensor_tensor_intt_intt) + LOAD_SYMBOL(_lantern_nonzero_static_tensor_intt_intt) + LOAD_SYMBOL(_lantern_Tensor_nonzero_static_tensor_intt_intt) LOAD_SYMBOL(_lantern_nonzero_numpy_tensor) LOAD_SYMBOL(_lantern_Tensor_nonzero_numpy_tensor) LOAD_SYMBOL(_lantern_argwhere_tensor) @@ -12339,6 +12745,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_remainder_scalar_tensor) LOAD_SYMBOL(_lantern_min_tensor) LOAD_SYMBOL(_lantern_Tensor_min_tensor) + LOAD_SYMBOL(_lantern_min_out_tensor_tensor) LOAD_SYMBOL(_lantern_fmin_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_fmin_tensor_tensor) LOAD_SYMBOL(_lantern_fmin_out_tensor_tensor_tensor) @@ -12391,6 +12798,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_intt_bool) LOAD_SYMBOL(_lantern_argsort_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_bool_intt_bool) + LOAD_SYMBOL(_lantern_argsort_out_tensor_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_argsort_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_dimname_bool) LOAD_SYMBOL(_lantern_topk_out_tensor_tensor_tensor_intt_intt_bool_bool) @@ -12442,57 +12850,70 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__amp_update_scale__tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__foreach_add_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_add_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add_tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_add__tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_div__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_div_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_div__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_add_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_add__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_div_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div__tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_exp_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero__tensorlist) - LOAD_SYMBOL(_lantern__foreach_exp__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt__tensorlist) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor) LOAD_SYMBOL(_lantern__foreach_abs_tensorlist) LOAD_SYMBOL(_lantern__foreach_abs__tensorlist) LOAD_SYMBOL(_lantern__foreach_acos_tensorlist) @@ -12511,10 +12932,20 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_erf__tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc_tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc__tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp_tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp__tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1_tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1__tensorlist) LOAD_SYMBOL(_lantern__foreach_floor_tensorlist) LOAD_SYMBOL(_lantern__foreach_floor__tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac_tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac__tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lgamma_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lgamma__tensorlist) LOAD_SYMBOL(_lantern__foreach_log_tensorlist) LOAD_SYMBOL(_lantern__foreach_log__tensorlist) LOAD_SYMBOL(_lantern__foreach_log10_tensorlist) @@ -12523,51 +12954,47 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_log1p__tensorlist) LOAD_SYMBOL(_lantern__foreach_log2_tensorlist) LOAD_SYMBOL(_lantern__foreach_log2__tensorlist) + LOAD_SYMBOL(_lantern__foreach_max_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg__tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan__tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh__tensorlist) - LOAD_SYMBOL(_lantern__foreach_round_tensorlist) - LOAD_SYMBOL(_lantern__foreach_round__tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma__tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac_tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac__tensorlist) + LOAD_SYMBOL(_lantern__foreach_norm_tensorlist_scalar_scalartype) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_pow_scalar_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_reciprocal_tensorlist) LOAD_SYMBOL(_lantern__foreach_reciprocal__tensorlist) + LOAD_SYMBOL(_lantern__foreach_round_tensorlist) + LOAD_SYMBOL(_lantern__foreach_round__tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid_tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt__tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan__tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh__tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc_tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc__tensorlist) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_norm_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_zero__tensorlist) + LOAD_SYMBOL(_lantern__foreach_copy__tensorlist_tensorlist_bool) + LOAD_SYMBOL(_lantern__foreach_copy_tensorlist_tensorlist_bool) LOAD_SYMBOL(_lantern_bucketize_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_bucketize_out_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_bucketize_scalar_tensor_bool_bool) LOAD_SYMBOL(_lantern_searchsorted_tensor_tensor_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor) + LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern__convert_indices_from_coo_to_csr_tensor_intt_bool) LOAD_SYMBOL(_lantern__convert_indices_from_coo_to_csr_out_tensor_tensor_intt_bool) LOAD_SYMBOL(_lantern__convert_indices_from_csr_to_coo_tensor_tensor_bool_bool) @@ -12998,6 +13425,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_logdet_tensor) LOAD_SYMBOL(_lantern_linalg_eig_tensor) LOAD_SYMBOL(_lantern_linalg_eig_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__linalg_eigvals_tensor) LOAD_SYMBOL(_lantern_linalg_eigvals_tensor) LOAD_SYMBOL(_lantern_linalg_eigvals_out_tensor_tensor) LOAD_SYMBOL(_lantern__linalg_eigh_tensor_cstringview_bool) @@ -13057,6 +13485,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_linalg_solve_ex_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_linalg_solve_tensor_tensor_bool) + LOAD_SYMBOL(_lantern__spsolve_tensor_tensor_bool) LOAD_SYMBOL(_lantern_linalg_solve_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_linalg_tensorinv_tensor_intt) LOAD_SYMBOL(_lantern_linalg_tensorinv_out_tensor_tensor_intt) @@ -13078,6 +13507,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_linalg_multi_dot_out_tensor_tensorlist) LOAD_SYMBOL(_lantern_nested_to_padded_tensor_tensor_double_intarrayref) LOAD_SYMBOL(_lantern__test_serialization_subcmul_tensor_tensor_scalar) + LOAD_SYMBOL(_lantern__test_parallel_materialize_tensor_intt_bool) LOAD_SYMBOL(_lantern__test_optional_intlist_tensor_intarrayref) LOAD_SYMBOL(_lantern__test_optional_filled_intlist_tensor_intarrayref) LOAD_SYMBOL(_lantern__test_optional_floatlist_tensor_arrayrefdouble) @@ -13091,7 +13521,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__test_autograd_multiple_dispatch_view_copy_tensor) LOAD_SYMBOL(_lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar) LOAD_SYMBOL(_lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar) - LOAD_SYMBOL(_lantern_pad_sequence_tensorlist_bool_double) + LOAD_SYMBOL(_lantern_pad_sequence_tensorlist_bool_double_cstringview) LOAD_SYMBOL(_lantern_flatten_dense_tensors_tensorlist) LOAD_SYMBOL(_lantern_unflatten_dense_tensors_tensor_tensorlist) LOAD_SYMBOL(_lantern__nested_tensor_from_tensor_list_tensorlist_scalartype_layout_device_bool) @@ -13135,28 +13565,35 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unfold_copy_tensor_intt_intt_intt) LOAD_SYMBOL(_lantern_alias_copy_tensor) LOAD_SYMBOL(_lantern_Tensor_to_padded_tensor_tensor_double_intarrayref) + LOAD_SYMBOL(_lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double) + LOAD_SYMBOL(_lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt) LOAD_SYMBOL(_lantern__nested_tensor_softmax_with_shape_tensor_tensor) + LOAD_SYMBOL(_lantern__safe_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt) - LOAD_SYMBOL(_lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool) - LOAD_SYMBOL(_lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor) - LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt) - LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool) - LOAD_SYMBOL(_lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool) - LOAD_SYMBOL(_lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt) - LOAD_SYMBOL(_lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool) - LOAD_SYMBOL(_lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool) + LOAD_SYMBOL(_lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool) + LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool) + LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double) + LOAD_SYMBOL(_lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt) + LOAD_SYMBOL(_lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt) + LOAD_SYMBOL(_lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool) LOAD_SYMBOL(_lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double) + LOAD_SYMBOL(_lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt) LOAD_SYMBOL(_lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_special_airy_ai_tensor) LOAD_SYMBOL(_lantern_special_airy_ai_out_tensor_tensor) - LOAD_SYMBOL(_lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_special_bessel_j0_tensor) LOAD_SYMBOL(_lantern_special_bessel_j0_out_tensor_tensor) LOAD_SYMBOL(_lantern_special_bessel_j1_tensor) @@ -13253,7 +13690,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_special_spherical_bessel_j0_out_tensor_tensor) LOAD_SYMBOL(_lantern__foobar_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__propagate_xla_data_tensor_tensor) LOAD_SYMBOL(_lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__cudnn_ctc_loss_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intt_bool_bool) LOAD_SYMBOL(_lantern__cudnn_rnn_flatten_weight_out_tensor_tensorlist_intt_intt_intt_intt_intt_intt_bool_bool) @@ -13268,6 +13711,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__add_relu_out_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_add_out_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern__test_functorch_fallback_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_bartlett_window_out_tensor_intt) LOAD_SYMBOL(_lantern_bartlett_window_out_tensor_intt_bool) LOAD_SYMBOL(_lantern_quantized_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt) @@ -13295,7 +13739,6 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_cudnn_affine_grid_generator_backward_out_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor) - LOAD_SYMBOL(_lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern__mps_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_mps_convolution_transpose_backward_out_tensor_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_stdarraybool) @@ -13319,6 +13762,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__embedding_bag_dense_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_bool_intt_tensor_intt) LOAD_SYMBOL(_lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat) + LOAD_SYMBOL(_lantern_empty_permuted_out_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_new_empty_out_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern_new_empty_strided_out_tensor_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_new_full_out_tensor_tensor_intarrayref_scalar) @@ -13335,6 +13779,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_empty_strided_out_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_fill_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_fill_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern_floor_divide_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_full_out_tensor_intarrayref_scalar_dimnamelist) LOAD_SYMBOL(_lantern_full_like_out_tensor_tensor_scalar_memoryformat) LOAD_SYMBOL(_lantern_from_file_out_tensor_cstringview_bool_intt) @@ -13354,9 +13799,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_kaiser_window_out_tensor_intt_bool_double) LOAD_SYMBOL(_lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double) LOAD_SYMBOL(_lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool) - LOAD_SYMBOL(_lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_isnan_out_tensor_tensor) LOAD_SYMBOL(_lantern_native_layer_norm_out_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_double) LOAD_SYMBOL(_lantern_native_layer_norm_backward_out_tensor_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool) @@ -13375,6 +13820,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_max_pool3d_backward_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) + LOAD_SYMBOL(_lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_median_out_tensor_tensor) LOAD_SYMBOL(_lantern_nanmedian_out_tensor_tensor) LOAD_SYMBOL(_lantern__mps_convolution_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt) @@ -13392,6 +13838,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_sparse_matmul_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_mul_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double) + LOAD_SYMBOL(_lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern_batch_norm_stats_out_tensor_tensor_tensor_double) LOAD_SYMBOL(_lantern_batch_norm_gather_stats_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern_batch_norm_gather_stats_with_counts_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_tensor) @@ -13434,7 +13881,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unsafe_split_out_tensorlist_tensor_intt_intt) LOAD_SYMBOL(_lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_sum_out_tensor_tensor_scalartype) - LOAD_SYMBOL(_lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_prod_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern__mkldnn_transpose_out_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_flip_out_tensor_tensor_intarrayref) @@ -13445,8 +13892,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__nested_from_padded_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern__nested_tensor_size_out_tensor_tensor) LOAD_SYMBOL(_lantern__nested_tensor_strides_out_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_tensor_storage_offsets_out_tensor_tensor) LOAD_SYMBOL(_lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_copy_out_tensor_tensor) LOAD_SYMBOL(_lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern__unique_out_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_unique_dim_out_tensor_tensor_tensor_tensor_intt_bool_bool_bool) @@ -13454,7 +13904,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unique_dim_consecutive_out_tensor_tensor_tensor_tensor_intt_bool_bool) LOAD_SYMBOL(_lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__unsafe_view_out_tensor_tensor_intarrayref) - LOAD_SYMBOL(_lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__weight_norm_interface_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_zeros_out_tensor_intarrayref_dimnamelist) @@ -13468,6 +13918,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_binomial_out_tensor_tensor_tensor_generator) LOAD_SYMBOL(_lantern_native_norm_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype) + LOAD_SYMBOL(_lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern__sparse_sum_out_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_sum_backward_out_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_csr_sum_out_tensor_tensor_intarrayref_bool_scalartype) @@ -13492,27 +13944,28 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_addmm_out_tensor_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_sparse_coo_tensor_out_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool) LOAD_SYMBOL(_lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_and_clear_out_tensor_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_mask_out_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__to_dense_out_tensor_tensor_scalartype) + LOAD_SYMBOL(_lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool) + LOAD_SYMBOL(_lantern__to_dense_out_tensor_tensor_scalartype_bool) LOAD_SYMBOL(_lantern__coalesce_out_tensor_tensor) LOAD_SYMBOL(_lantern__coalesced_out_tensor_tensor_bool) LOAD_SYMBOL(_lantern__coalesced_tensor_bool) LOAD_SYMBOL(_lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_copy_sparse_to_sparse_tensor_tensor_bool) - LOAD_SYMBOL(_lantern_to_sparse_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt) - LOAD_SYMBOL(_lantern_to_sparse_csr_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_csc_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt) - LOAD_SYMBOL(_lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_csr_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_csc_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_to_mkldnn_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) - LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt) + LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) LOAD_SYMBOL(_lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool) LOAD_SYMBOL(_lantern_quantize_per_tensor_out_tensor_tensor_double_intt_scalartype) LOAD_SYMBOL(_lantern_quantize_per_tensor_out_tensor_tensor_tensor_tensor_scalartype) @@ -13592,7 +14045,6 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__histogramdd_from_bin_cts_out_tensor_tensor_intarrayref_arrayrefdouble_tensor_bool) LOAD_SYMBOL(_lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool) LOAD_SYMBOL(_lantern_remainder_out_tensor_scalar_tensor) - LOAD_SYMBOL(_lantern_argsort_out_tensor_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt) LOAD_SYMBOL(_lantern_normal_out_tensor_tensor_double_double_generator) LOAD_SYMBOL(_lantern__amp_foreach_non_finite_check_and_unscale_out_tensorlist_tensorlist_tensor_tensor) @@ -13600,33 +14052,38 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__amp_update_scale_out_tensor_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_exp_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) LOAD_SYMBOL(_lantern__foreach_abs_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_acos_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_asin_out_tensorlist_tensorlist) @@ -13636,34 +14093,37 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_cosh_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_erf_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_floor_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lgamma_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log10_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log1p_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log2_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_max_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_round_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_reciprocal_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_round_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_norm_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_zero_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_zero_tensorlist) + LOAD_SYMBOL(_lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool) LOAD_SYMBOL(_lantern_bucketize_out_tensor_scalar_tensor_bool_bool) - LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_glu_backward_jvp_out_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_hardswish_backward_out_tensor_tensor_tensor) @@ -13726,13 +14186,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__native_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt) LOAD_SYMBOL(_lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double) LOAD_SYMBOL(_lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern__foobar_out_tensor_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) /* Autogen Symbols -- End */ return true; diff --git a/inst/include/lantern/types.h b/inst/include/lantern/types.h index 111aabfff8..9f606c9b95 100644 --- a/inst/include/lantern/types.h +++ b/inst/include/lantern/types.h @@ -1,3 +1,4 @@ +#include #include // https://pt.stackoverflow.com/a/438284/6036 @@ -156,6 +157,7 @@ LANTERN_FROM_RAW_DECL(SymInt, c10::SymInt) LANTERN_FROM_RAW_DECL(SymIntArrayRef, c10::SymIntArrayRef) LANTERN_FROM_RAW_DECL(FunctionSchema, c10::FunctionSchema) LANTERN_FROM_RAW_DECL(Argument, c10::Argument) +LANTERN_FROM_RAW_DECL(DeviceIndex, at::DeviceIndex) namespace optional { LANTERN_FROM_RAW_DECL(DimnameList, c10::optional) @@ -589,6 +591,7 @@ LANTERN_FROM_RAW(SymInt, c10::SymInt) LANTERN_FROM_RAW_WRAPPED(SymIntArrayRef, self_contained::SymIntArrayRef, c10::SymIntArrayRef) LANTERN_FROM_RAW(FunctionSchema, c10::FunctionSchema) LANTERN_FROM_RAW(Argument, c10::Argument) +LANTERN_FROM_RAW(DeviceIndex, at::DeviceIndex) namespace optional { LANTERN_FROM_RAW_WRAPPED(DimnameList, self_contained::optional::DimnameList, diff --git a/inst/include/utils.h b/inst/include/utils.h index 116d11ed2e..59bc27f956 100644 --- a/inst/include/utils.h +++ b/inst/include/utils.h @@ -50,8 +50,13 @@ class EventLoop { { std::unique_lock lock(mtx_); if (tasks_.empty()) { - cv_.wait(lock, [this] { return !tasks_.empty(); }); + cv_.wait(lock, [this] { return stop_requested_ || !tasks_.empty(); }); } + + if (stop_requested_ && tasks_.empty()) { + return; + } + fn = std::move(tasks_.front()); tasks_.pop_front(); } @@ -76,10 +81,15 @@ class EventLoop { } cv_.notify_one(); } + void stop() { + stop_requested_ = true; + cv_.notify_all(); + } private: std::mutex mtx_; std::condition_variable cv_; + std::atomic stop_requested_{false}; std::deque> tasks_; }; @@ -88,6 +98,7 @@ class ThreadPool { public: EventLoop event_loop; std::vector threads; + ThreadPool (int n_threads = 5) { for(int i = 0; i < n_threads; i++) { threads.push_back(std::thread([this] () { @@ -98,6 +109,16 @@ class ThreadPool { void push (std::packaged_task&& task) { this->event_loop.schedule(std::move(task)); } + void stop() { + event_loop.stop(); + for(auto& thread : threads) { + thread.join(); + } + threads.clear(); + } + ~ThreadPool() { + stop(); + } }; template diff --git a/man/torch_can_cast.Rd b/man/torch_can_cast.Rd index deb6e08a61..b907a4b5fe 100644 --- a/man/torch_can_cast.Rd +++ b/man/torch_can_cast.Rd @@ -5,10 +5,10 @@ \alias{torch_can_cast} \title{Can_cast} \usage{ -torch_can_cast(from, to) +torch_can_cast(from_, to) } \arguments{ -\item{from}{(dtype) The original \code{torch_dtype}.} +\item{from_}{(dtype) The original \code{torch_dtype}.} \item{to}{(dtype) The target \code{torch_dtype}.} } diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index 5b73f11e32..7c2923d89a 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -1522,6 +1522,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method_all_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_method_all_self_Tensor_dim_IntArrayRef(XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_method_all_self_Tensor_dim_IntArrayRef(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_all_self_Tensor_dim_IntArrayRef(self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_all_self_Tensor_dim_Dimname XPtrTorchTensor cpp_torch_method_all_self_Tensor_dim_Dimname(XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_method_all_self_Tensor_dim_Dimname(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -1560,6 +1572,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method_any_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_method_any_self_Tensor_dim_IntArrayRef(XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_method_any_self_Tensor_dim_IntArrayRef(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_any_self_Tensor_dim_IntArrayRef(self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_any_self_Tensor_dim_Dimname XPtrTorchTensor cpp_torch_method_any_self_Tensor_dim_Dimname(XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_method_any_self_Tensor_dim_Dimname(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -1973,6 +1997,16 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__lazy_clone_self_Tensor +XPtrTorchTensor cpp_torch_method__lazy_clone_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_method__lazy_clone_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__lazy_clone_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_logical_not_self_Tensor XPtrTorchTensor cpp_torch_method_logical_not_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_method_logical_not_self_Tensor(SEXP selfSEXP) { @@ -3342,14 +3376,14 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor -XPtrTorchTensor cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); -RcppExport SEXP _torch_cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor(SEXP selfSEXP, SEXP indicesSEXP) { +// cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor +XPtrTorchTensor cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); +RcppExport SEXP _torch_cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor(SEXP selfSEXP, SEXP indicesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor(self, indices)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor(self, indices)); return rcpp_result_gen; END_RCPP } @@ -3405,29 +3439,29 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); -RcppExport SEXP _torch_cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +// cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate)); return rcpp_result_gen; END_RCPP } -// cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); -RcppExport SEXP _torch_cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +// cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate)); return rcpp_result_gen; END_RCPP } @@ -4906,6 +4940,21 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method_slice_inverse_self_Tensor_src_Tensor +XPtrTorchTensor cpp_torch_method_slice_inverse_self_Tensor_src_Tensor(XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step); +RcppExport SEXP _torch_cpp_torch_method_slice_inverse_self_Tensor_src_Tensor(SEXP selfSEXP, SEXP srcSEXP, SEXP dimSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type src(srcSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type step(stepSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_slice_inverse_self_Tensor_src_Tensor(self, src, dim, start, end, step)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_slice_scatter_self_Tensor_src_Tensor XPtrTorchTensor cpp_torch_method_slice_scatter_self_Tensor_src_Tensor(XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step); RcppExport SEXP _torch_cpp_torch_method_slice_scatter_self_Tensor_src_Tensor(SEXP selfSEXP, SEXP srcSEXP, SEXP dimSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepSEXP) { @@ -5640,13 +5689,13 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_method__nested_tensor_offsets_self_Tensor -XPtrTorchIntArrayRef cpp_torch_method__nested_tensor_offsets_self_Tensor(XPtrTorchTensor self); -RcppExport SEXP _torch_cpp_torch_method__nested_tensor_offsets_self_Tensor(SEXP selfSEXP) { +// cpp_torch_method__nested_tensor_storage_offsets_self_Tensor +XPtrTorchTensor cpp_torch_method__nested_tensor_storage_offsets_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_method__nested_tensor_storage_offsets_self_Tensor(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method__nested_tensor_offsets_self_Tensor(self)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__nested_tensor_storage_offsets_self_Tensor(self)); return rcpp_result_gen; END_RCPP } @@ -6134,25 +6183,39 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor +XPtrTorchTensor cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor(XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchbool accumulate_matches); +RcppExport SEXP _torch_cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor(SEXP selfSEXP, SEXP maskSEXP, SEXP accumulate_matchesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mask(maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate_matches(accumulate_matchesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor(self, mask, accumulate_matches)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_dense_self_Tensor -XPtrTorchTensor cpp_torch_method_to_dense_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype); -RcppExport SEXP _torch_cpp_torch_method_to_dense_self_Tensor(SEXP selfSEXP, SEXP dtypeSEXP) { +XPtrTorchTensor cpp_torch_method_to_dense_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad); +RcppExport SEXP _torch_cpp_torch_method_to_dense_self_Tensor(SEXP selfSEXP, SEXP dtypeSEXP, SEXP masked_gradSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method_to_dense_self_Tensor(self, dtype)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type masked_grad(masked_gradSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_to_dense_self_Tensor(self, dtype, masked_grad)); return rcpp_result_gen; END_RCPP } // cpp_torch_method__to_dense_self_Tensor -XPtrTorchTensor cpp_torch_method__to_dense_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype); -RcppExport SEXP _torch_cpp_torch_method__to_dense_self_Tensor(SEXP selfSEXP, SEXP dtypeSEXP) { +XPtrTorchTensor cpp_torch_method__to_dense_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad); +RcppExport SEXP _torch_cpp_torch_method__to_dense_self_Tensor(SEXP selfSEXP, SEXP dtypeSEXP, SEXP masked_gradSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_dense_self_Tensor(self, dtype)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type masked_grad(masked_gradSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_dense_self_Tensor(self, dtype, masked_grad)); return rcpp_result_gen; END_RCPP } @@ -6350,6 +6413,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t +XPtrTorchTensor cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t(XPtrTorchTensor self, XPtrTorchint64_t sparse_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t(SEXP selfSEXP, SEXP sparse_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type sparse_dim(sparse_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t(self, sparse_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_sparse_self_Tensor XPtrTorchTensor cpp_torch_method_to_sparse_self_Tensor(XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); RcppExport SEXP _torch_cpp_torch_method_to_sparse_self_Tensor(SEXP selfSEXP, SEXP layoutSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { @@ -6363,6 +6437,19 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_self_Tensor +XPtrTorchTensor cpp_torch_method__to_sparse_self_Tensor(XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_self_Tensor(SEXP selfSEXP, SEXP layoutSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchLayout >::type layout(layoutSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIntArrayRef >::type blocksize(blocksizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_self_Tensor(self, layout, blocksize, dense_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_sparse_csr_self_Tensor XPtrTorchTensor cpp_torch_method_to_sparse_csr_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); RcppExport SEXP _torch_cpp_torch_method_to_sparse_csr_self_Tensor(SEXP selfSEXP, SEXP dense_dimSEXP) { @@ -6374,6 +6461,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_csr_self_Tensor +XPtrTorchTensor cpp_torch_method__to_sparse_csr_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_csr_self_Tensor(SEXP selfSEXP, SEXP dense_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_csr_self_Tensor(self, dense_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_sparse_csc_self_Tensor XPtrTorchTensor cpp_torch_method_to_sparse_csc_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); RcppExport SEXP _torch_cpp_torch_method_to_sparse_csc_self_Tensor(SEXP selfSEXP, SEXP dense_dimSEXP) { @@ -6385,6 +6483,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_csc_self_Tensor +XPtrTorchTensor cpp_torch_method__to_sparse_csc_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_csc_self_Tensor(SEXP selfSEXP, SEXP dense_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_csc_self_Tensor(self, dense_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef XPtrTorchTensor cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); RcppExport SEXP _torch_cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef(SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { @@ -6397,6 +6506,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef +XPtrTorchTensor cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef(SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type blocksize(blocksizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef(self, blocksize, dense_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef XPtrTorchTensor cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); RcppExport SEXP _torch_cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef(SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { @@ -6409,6 +6530,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef +XPtrTorchTensor cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef(SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type blocksize(blocksizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef(self, blocksize, dense_dim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_to_mkldnn_self_Tensor XPtrTorchTensor cpp_torch_method_to_mkldnn_self_Tensor(XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype); RcppExport SEXP _torch_cpp_torch_method_to_mkldnn_self_Tensor(SEXP selfSEXP, SEXP dtypeSEXP) { @@ -8393,6 +8526,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_method_nonzero_static_self_Tensor_size_int64_t +XPtrTorchTensor cpp_torch_method_nonzero_static_self_Tensor_size_int64_t(XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value); +RcppExport SEXP _torch_cpp_torch_method_nonzero_static_self_Tensor_size_int64_t(SEXP selfSEXP, SEXP sizeSEXP, SEXP fill_valueSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type fill_value(fill_valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_method_nonzero_static_self_Tensor_size_int64_t(self, size, fill_value)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_method_nonzero_numpy_self_Tensor XPtrTorchTensorList cpp_torch_method_nonzero_numpy_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_method_nonzero_numpy_self_Tensor(SEXP selfSEXP) { @@ -9799,6 +9944,50 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view +void cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view(XPtrTorchTensor self, XPtrTorchstring_view assert_msg); +RcppExport SEXP _torch_cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view(SEXP selfSEXP, SEXP assert_msgSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type assert_msg(assert_msgSEXP); + cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view(self, assert_msg); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view +void cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view(XPtrTorchScalar self, XPtrTorchstring_view assert_msg); +RcppExport SEXP _torch_cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view(SEXP selfSEXP, SEXP assert_msgSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchScalar >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type assert_msg(assert_msgSEXP); + cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view(self, assert_msg); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor +XPtrTorchTensor cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor(XPtrTorchScalar self, XPtrTorchstring_view assert_msg, XPtrTorchTensor dep_token); +RcppExport SEXP _torch_cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor(SEXP selfSEXP, SEXP assert_msgSEXP, SEXP dep_tokenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type assert_msg(assert_msgSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dep_token(dep_tokenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor(self, assert_msg, dep_token)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor +XPtrTorchTensor cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor(XPtrTorchTensor self, XPtrTorchstring_view assert_msg, XPtrTorchTensor dep_token); +RcppExport SEXP _torch_cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor(SEXP selfSEXP, SEXP assert_msgSEXP, SEXP dep_tokenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type assert_msg(assert_msgSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dep_token(dep_tokenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor(self, assert_msg, dep_token)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__assert_tensor_metadata_a_Tensor void cpp_torch_namespace__assert_tensor_metadata_a_Tensor(XPtrTorchTensor a, XPtrTorchOptionalIntArrayRef size, XPtrTorchOptionalIntArrayRef stride, XPtrTorchoptional_scalar_type dtype); RcppExport SEXP _torch_cpp_torch_namespace__assert_tensor_metadata_a_Tensor(SEXP aSEXP, SEXP sizeSEXP, SEXP strideSEXP, SEXP dtypeSEXP) { @@ -9811,6 +10000,63 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__print_s_c10string_view +void cpp_torch_namespace__print_s_c10string_view(XPtrTorchstring_view s); +RcppExport SEXP _torch_cpp_torch_namespace__print_s_c10string_view(SEXP sSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type s(sSEXP); + cpp_torch_namespace__print_s_c10string_view(s); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace_sym_constrain_range_size_Scalar +void cpp_torch_namespace_sym_constrain_range_size_Scalar(XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max); +RcppExport SEXP _torch_cpp_torch_namespace_sym_constrain_range_size_Scalar(SEXP sizeSEXP, SEXP minSEXP, SEXP maxSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchScalar >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type min(minSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max(maxSEXP); + cpp_torch_namespace_sym_constrain_range_size_Scalar(size, min, max); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar +void cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar(XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max); +RcppExport SEXP _torch_cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar(SEXP sizeSEXP, SEXP minSEXP, SEXP maxSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchScalar >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type min(minSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max(maxSEXP); + cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar(size, min, max); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor +XPtrTorchTensor cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max, XPtrTorchTensor dep_token); +RcppExport SEXP _torch_cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(SEXP sizeSEXP, SEXP minSEXP, SEXP maxSEXP, SEXP dep_tokenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type min(minSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max(maxSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dep_token(dep_tokenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(size, min, max, dep_token)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor +XPtrTorchTensor cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max, XPtrTorchTensor dep_token); +RcppExport SEXP _torch_cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(SEXP sizeSEXP, SEXP minSEXP, SEXP maxSEXP, SEXP dep_tokenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type min(minSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max(maxSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dep_token(dep_tokenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor(size, min, max, dep_token)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t XPtrTorchbool cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t(XPtrTorchTensor log_probs, XPtrTorchTensor targets, XPtrTorchIntArrayRef input_lengths, XPtrTorchIntArrayRef target_lengths, XPtrTorchint64_t blank); RcppExport SEXP _torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t(SEXP log_probsSEXP, SEXP targetsSEXP, SEXP input_lengthsSEXP, SEXP target_lengthsSEXP, SEXP blankSEXP) { @@ -10733,6 +10979,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor +XPtrTorchTensor cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor(XPtrTorchTensor self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor(self, other)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_all_self_Tensor_dim_int64_t XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_int64_t(XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_namespace_all_self_Tensor_dim_int64_t(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -10745,6 +11002,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef(XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef(self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t XPtrTorchTensor cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t(SEXP outSEXP, SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -10758,6 +11027,19 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef(out, self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_all_self_Tensor_dim_Dimname XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_Dimname(XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_namespace_all_self_Tensor_dim_Dimname(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -10809,6 +11091,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef(XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef(self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t XPtrTorchTensor cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t(SEXP outSEXP, SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -10822,6 +11116,19 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim); +RcppExport SEXP _torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexIntArrayRef >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type keepdim(keepdimSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef(out, self, dim, keepdim)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_any_self_Tensor_dim_Dimname XPtrTorchTensor cpp_torch_namespace_any_self_Tensor_dim_Dimname(XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim); RcppExport SEXP _torch_cpp_torch_namespace_any_self_Tensor_dim_Dimname(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP) { @@ -11675,6 +11982,16 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__lazy_clone_self_Tensor +XPtrTorchTensor cpp_torch_namespace__lazy_clone_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__lazy_clone_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__lazy_clone_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_logical_not_self_Tensor XPtrTorchTensor cpp_torch_namespace_logical_not_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_logical_not_self_Tensor(SEXP selfSEXP) { @@ -13022,6 +13339,25 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool +XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32); +RcppExport SEXP _torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(SEXP outSEXP, SEXP selfSEXP, SEXP weightSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP benchmarkSEXP, SEXP deterministicSEXP, SEXP allow_tf32SEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type padding(paddingSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type groups(groupsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type benchmark(benchmarkSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type deterministic(deterministicSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type allow_tf32(allow_tf32SEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef output_padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32); RcppExport SEXP _torch_cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(SEXP selfSEXP, SEXP weightSEXP, SEXP paddingSEXP, SEXP output_paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP benchmarkSEXP, SEXP deterministicSEXP, SEXP allow_tf32SEXP) { @@ -14205,6 +14541,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef(XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef physical_layout, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef(SEXP sizeSEXP, SEXP physical_layoutSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type physical_layout(physical_layoutSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef(size, physical_layout, options)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef XPtrTorchTensor cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef(XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchdouble scale, XPtrTorchint64_t zero_point, XPtrTorchoptional_memory_format memory_format); RcppExport SEXP _torch_cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef(SEXP sizeSEXP, SEXP optionsSEXP, SEXP scaleSEXP, SEXP zero_pointSEXP, SEXP memory_formatSEXP) { @@ -15213,65 +15561,63 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t -XPtrTorchint64_t cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t(XPtrTorchint64_t device_index); -RcppExport SEXP _torch_cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t(SEXP device_indexSEXP) { +// cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor +XPtrTorchTensor cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); +RcppExport SEXP _torch_cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor(SEXP selfSEXP, SEXP indicesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type device_index(device_indexSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t(device_index)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor(self, indices)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t -XPtrTorchint64_t cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t(XPtrTorchint64_t device_index); -RcppExport SEXP _torch_cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t(SEXP device_indexSEXP) { +// cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor +XPtrTorchTensor cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); +RcppExport SEXP _torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type device_index(device_indexSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t(device_index)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor(out, self, indices)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t -void cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t(XPtrTorchint64_t device_index, XPtrTorchint64_t max_size); -RcppExport SEXP _torch_cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t(SEXP device_indexSEXP, SEXP max_sizeSEXP) { +// cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor +XPtrTorchTensor cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); +RcppExport SEXP _torch_cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor(SEXP selfSEXP, SEXP indicesSEXP) { BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type device_index(device_indexSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_size(max_sizeSEXP); - cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t(device_index, max_size); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t -void cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t(XPtrTorchint64_t device_index); -RcppExport SEXP _torch_cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t(SEXP device_indexSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type device_index(device_indexSEXP); - cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t(device_index); - return R_NilValue; + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor(self, indices)); + return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor -XPtrTorchTensor cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); -RcppExport SEXP _torch_cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor(SEXP selfSEXP, SEXP indicesSEXP) { +// cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar +XPtrTorchTensor cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar(XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchOptionalIndexTensorList indices, XPtrTorchScalar fill); +RcppExport SEXP _torch_cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar(SEXP selfSEXP, SEXP maskSEXP, SEXP indicesSEXP, SEXP fillSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mask(maskSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor(self, indices)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type fill(fillSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar(self, mask, indices, fill)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor -XPtrTorchTensor cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices); -RcppExport SEXP _torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP) { +// cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values); +RcppExport SEXP _torch_cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP maskSEXP, SEXP indicesSEXP, SEXP valuesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mask(maskSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor(out, self, indices)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, mask, indices, values)); return rcpp_result_gen; END_RCPP } @@ -15315,35 +15661,48 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); -RcppExport SEXP _torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +// cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); -RcppExport SEXP _torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +// cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); -RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { +// cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); +RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); @@ -15351,7 +15710,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type unsafe(unsafeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate, unsafe)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate, unsafe)); return rcpp_result_gen; END_RCPP } @@ -15727,6 +16086,19 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef(XPtrTorchTensor input, XPtrTorchIntArrayRef normalized_shape, XPtrTorchOptionalTensor weight, XPtrTorchOptionaldouble eps); +RcppExport SEXP _torch_cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef(SEXP inputSEXP, SEXP normalized_shapeSEXP, SEXP weightSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type normalized_shape(normalized_shapeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef(input, normalized_shape, weight, eps)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_nan_to_num_self_Tensor XPtrTorchTensor cpp_torch_namespace_nan_to_num_self_Tensor(XPtrTorchTensor self, XPtrTorchOptionaldouble nan, XPtrTorchOptionaldouble posinf, XPtrTorchOptionaldouble neginf); RcppExport SEXP _torch_cpp_torch_namespace_nan_to_num_self_Tensor(SEXP selfSEXP, SEXP nanSEXP, SEXP posinfSEXP, SEXP neginfSEXP) { @@ -15855,6 +16227,139 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__cslt_compress_input_Tensor +XPtrTorchTensor cpp_torch_namespace__cslt_compress_input_Tensor(XPtrTorchTensor input); +RcppExport SEXP _torch_cpp_torch_namespace__cslt_compress_input_Tensor(SEXP inputSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__cslt_compress_input_Tensor(input)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor +XPtrTorchTensor cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor(XPtrTorchTensor compressed_A, XPtrTorchTensor dense_B, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor alpha, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool transpose_result, XPtrTorchint64_t alg_id); +RcppExport SEXP _torch_cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor(SEXP compressed_ASEXP, SEXP dense_BSEXP, SEXP biasSEXP, SEXP alphaSEXP, SEXP out_dtypeSEXP, SEXP transpose_resultSEXP, SEXP alg_idSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type compressed_A(compressed_ASEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dense_B(dense_BSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type alpha(alphaSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type transpose_result(transpose_resultSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type alg_id(alg_idSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor +XPtrTorchint64_t cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor(XPtrTorchTensor compressed_A, XPtrTorchTensor dense_B, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor alpha, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool transpose_result); +RcppExport SEXP _torch_cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor(SEXP compressed_ASEXP, SEXP dense_BSEXP, SEXP biasSEXP, SEXP alphaSEXP, SEXP out_dtypeSEXP, SEXP transpose_resultSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type compressed_A(compressed_ASEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dense_B(dense_BSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type alpha(alphaSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type transpose_result(transpose_resultSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor +Rcpp::List cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor(XPtrTorchTensor input, XPtrTorchstring_view algorithm, XPtrTorchbool use_cutlass); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor(SEXP inputSEXP, SEXP algorithmSEXP, SEXP use_cutlassSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type algorithm(algorithmSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type use_cutlass(use_cutlassSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor(input, algorithm, use_cutlass)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor +Rcpp::List cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor(XPtrTorchTensor input, XPtrTorchTensor thread_masks); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor(SEXP inputSEXP, SEXP thread_masksSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type thread_masks(thread_masksSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor(input, thread_masks)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor(XPtrTorchTensor input, XPtrTorchTensor thread_masks); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor(SEXP inputSEXP, SEXP thread_masksSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type thread_masks(thread_masksSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor(input, thread_masks)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor(XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor meta, XPtrTorchOptionalTensor bias, XPtrTorchoptional_string_view activation, XPtrTorchoptional_scalar_type out_dtype); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor(SEXP inputSEXP, SEXP weightSEXP, SEXP metaSEXP, SEXP biasSEXP, SEXP activationSEXP, SEXP out_dtypeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type meta(metaSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_string_view >::type activation(activationSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor(input, weight, meta, bias, activation, out_dtype)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(XPtrTorchTensor mat1, XPtrTorchTensor mat1_meta, XPtrTorchTensor mat2, XPtrTorchoptional_scalar_type out_dtype); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(SEXP mat1SEXP, SEXP mat1_metaSEXP, SEXP mat2SEXP, SEXP out_dtypeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat1(mat1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat1_meta(mat1_metaSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(mat1, mat1_meta, mat2, out_dtype)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(XPtrTorchTensor input, XPtrTorchTensor mat1, XPtrTorchTensor mat1_meta, XPtrTorchTensor mat2, XPtrTorchScalar alpha, XPtrTorchScalar beta, XPtrTorchoptional_scalar_type out_dtype); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(SEXP inputSEXP, SEXP mat1SEXP, SEXP mat1_metaSEXP, SEXP mat2SEXP, SEXP alphaSEXP, SEXP betaSEXP, SEXP out_dtypeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat1(mat1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat1_meta(mat1_metaSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type beta(betaSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor +XPtrTorchTensor cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor(XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor scale, XPtrTorchOptionalTensor bias, XPtrTorchoptional_string_view activation); +RcppExport SEXP _torch_cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor(SEXP inputSEXP, SEXP weightSEXP, SEXP scaleSEXP, SEXP biasSEXP, SEXP activationSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_string_view >::type activation(activationSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor(input, weight, scale, bias, activation)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor XPtrTorchTensor cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor(XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor packed, XPtrTorchTensor col_offsets, XPtrTorchScalar weight_scale, XPtrTorchScalar weight_zero_point, XPtrTorchTensor bias); RcppExport SEXP _torch_cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor(SEXP inputSEXP, SEXP weightSEXP, SEXP packedSEXP, SEXP col_offsetsSEXP, SEXP weight_scaleSEXP, SEXP weight_zero_pointSEXP, SEXP biasSEXP) { @@ -15907,6 +16412,35 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor +XPtrTorchTensor cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor(XPtrTorchTensor weight, XPtrTorchTensor weight_scale, XPtrTorchTensor weight_zero_point, XPtrTorchTensor bias); +RcppExport SEXP _torch_cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor(SEXP weightSEXP, SEXP weight_scaleSEXP, SEXP weight_zero_pointSEXP, SEXP biasSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight_scale(weight_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight_zero_point(weight_zero_pointSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type bias(biasSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor(weight, weight_scale, weight_zero_point, bias)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t +XPtrTorchTensor cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t(XPtrTorchTensor input, XPtrTorchTensor input_scale, XPtrTorchTensor input_zero_point, XPtrTorchTensor packed_weight, XPtrTorchTensor output_scale, XPtrTorchTensor output_zero_point, XPtrTorchint64_t out_channel); +RcppExport SEXP _torch_cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t(SEXP inputSEXP, SEXP input_scaleSEXP, SEXP input_zero_pointSEXP, SEXP packed_weightSEXP, SEXP output_scaleSEXP, SEXP output_zero_pointSEXP, SEXP out_channelSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input_scale(input_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input_zero_point(input_zero_pointSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type packed_weight(packed_weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type output_scale(output_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type output_zero_point(output_zero_pointSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type out_channel(out_channelSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor XPtrTorchTensor cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor(XPtrTorchTensor input, XPtrTorchTensor packed_weight, XPtrTorchTensor bias); RcppExport SEXP _torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor(SEXP inputSEXP, SEXP packed_weightSEXP, SEXP biasSEXP) { @@ -16000,6 +16534,45 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t(XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t(start, end, steps, options)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t(XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t(start, end, steps, options)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t(XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t(start, end, steps, options)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t(XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchScalar end, XPtrTorchint64_t steps); RcppExport SEXP _torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP) { @@ -16013,6 +16586,45 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(out, start, end, steps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(out, start, end, steps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps); +RcppExport SEXP _torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(out, start, end, steps)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_log_self_Tensor XPtrTorchTensor cpp_torch_namespace_log_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_log_self_Tensor(SEXP selfSEXP) { @@ -16288,6 +16900,48 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t(XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t(start, end, steps, base, options)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t(XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t(start, end, steps, base, options)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t(XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t(SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t(start, end, steps, base, options)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t(XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base); RcppExport SEXP _torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP) { @@ -16302,6 +16956,48 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t(out, start, end, steps, base)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t(out, start, end, steps, base)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base); +RcppExport SEXP _torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(SEXP outSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepsSEXP, SEXP baseSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type steps(stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type base(baseSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t(out, start, end, steps, base)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t XPtrTorchTensor cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t(XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchoptional_scalar_type dtype); RcppExport SEXP _torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t(SEXP selfSEXP, SEXP dimSEXP, SEXP dtypeSEXP) { @@ -16921,6 +17617,21 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode); +RcppExport SEXP _torch_cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef(SEXP selfSEXP, SEXP kernel_sizeSEXP, SEXP strideSEXP, SEXP paddingSEXP, SEXP dilationSEXP, SEXP ceil_modeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type kernel_size(kernel_sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type padding(paddingSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type ceil_mode(ceil_modeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef(self, kernel_size, stride, padding, dilation, ceil_mode)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef XPtrTorchTensor cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef(XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode); RcppExport SEXP _torch_cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef(SEXP selfSEXP, SEXP kernel_sizeSEXP, SEXP strideSEXP, SEXP paddingSEXP, SEXP dilationSEXP, SEXP ceil_modeSEXP) { @@ -16947,6 +17658,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_mean_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace_mean_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype); +RcppExport SEXP _torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dtypeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_mean_out_out_Tensor_self_Tensor(out, self, dtype)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef XPtrTorchTensor cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef(XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim, XPtrTorchoptional_scalar_type dtype); RcppExport SEXP _torch_cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef(SEXP selfSEXP, SEXP dimSEXP, SEXP keepdimSEXP, SEXP dtypeSEXP) { @@ -17534,6 +18257,65 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor +XPtrTorchTensor cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor(XPtrTorchTensor self, XPtrTorchTensor mat2); +RcppExport SEXP _torch_cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor(SEXP selfSEXP, SEXP mat2SEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor(self, mat2)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor +XPtrTorchTensor cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mat2); +RcppExport SEXP _torch_cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP mat2SEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor(out, self, mat2)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t +XPtrTorchTensor cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t(XPtrTorchTensor self, XPtrTorchint64_t innerKTiles); +RcppExport SEXP _torch_cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t(SEXP selfSEXP, SEXP innerKTilesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type innerKTiles(innerKTilesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t(self, innerKTiles)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor +XPtrTorchTensor cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor(XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchint64_t qGroupSize, XPtrTorchTensor qScaleAndZeros); +RcppExport SEXP _torch_cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor(SEXP selfSEXP, SEXP mat2SEXP, SEXP qGroupSizeSEXP, SEXP qScaleAndZerosSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type qGroupSize(qGroupSizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type qScaleAndZeros(qScaleAndZerosSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor(self, mat2, qGroupSize, qScaleAndZeros)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor +XPtrTorchTensor cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor(XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scales); +RcppExport SEXP _torch_cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor(SEXP selfSEXP, SEXP mat2SEXP, SEXP scalesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scales(scalesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor(self, mat2, scales)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor XPtrTorchTensor cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor(XPtrTorchTensor sparse, XPtrTorchTensor dense); RcppExport SEXP _torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor(SEXP sparseSEXP, SEXP denseSEXP) { @@ -17841,6 +18623,22 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double Rcpp::List cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double(XPtrTorchTensor out, XPtrTorchTensor save_mean, XPtrTorchTensor save_invstd, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchbool training, XPtrTorchdouble momentum, XPtrTorchdouble eps); RcppExport SEXP _torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double(SEXP outSEXP, SEXP save_meanSEXP, SEXP save_invstdSEXP, SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP trainingSEXP, SEXP momentumSEXP, SEXP epsSEXP) { @@ -18006,9 +18804,9 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor -XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor mean_dy, XPtrTorchTensor mean_dy_xmu, XPtrTorchTensor count); -RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(SEXP grad_outSEXP, SEXP inputSEXP, SEXP meanSEXP, SEXP invstdSEXP, SEXP weightSEXP, SEXP mean_dySEXP, SEXP mean_dy_xmuSEXP, SEXP countSEXP) { +// cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor +XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor sum_dy, XPtrTorchTensor sum_dy_xmu, XPtrTorchTensor count); +RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(SEXP grad_outSEXP, SEXP inputSEXP, SEXP meanSEXP, SEXP invstdSEXP, SEXP weightSEXP, SEXP sum_dySEXP, SEXP sum_dy_xmuSEXP, SEXP countSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); @@ -18016,10 +18814,10 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean(meanSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type invstd(invstdSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean_dy(mean_dySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean_dy_xmu(mean_dy_xmuSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type sum_dy(sum_dySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type sum_dy_xmu(sum_dy_xmuSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type count(countSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count)); return rcpp_result_gen; END_RCPP } @@ -19719,6 +20517,21 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor +XPtrTorchTensor cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor(XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step); +RcppExport SEXP _torch_cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor(SEXP selfSEXP, SEXP srcSEXP, SEXP dimSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type src(srcSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type start(startSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type end(endSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type step(stepSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor(self, src, dim, start, end, step)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor XPtrTorchTensor cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor(XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step); RcppExport SEXP _torch_cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor(SEXP selfSEXP, SEXP srcSEXP, SEXP dimSEXP, SEXP startSEXP, SEXP endSEXP, SEXP stepSEXP) { @@ -20073,6 +20886,31 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t +XPtrTorchTensor cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t(XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim, XPtrTorchint64_t num_chunks); +RcppExport SEXP _torch_cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t(SEXP tensorsSEXP, SEXP dimSEXP, SEXP num_chunksSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors(tensorsSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_chunks(num_chunksSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t(tensors, dim, num_chunks)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t +XPtrTorchTensor cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t(XPtrTorchTensor out, XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim, XPtrTorchint64_t num_chunks); +RcppExport SEXP _torch_cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t(SEXP outSEXP, SEXP tensorsSEXP, SEXP dimSEXP, SEXP num_chunksSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors(tensorsSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_chunks(num_chunksSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t(out, tensors, dim, num_chunks)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_stack_tensors_TensorList XPtrTorchTensor cpp_torch_namespace_stack_tensors_TensorList(XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim); RcppExport SEXP _torch_cpp_torch_namespace_stack_tensors_TensorList(SEXP tensorsSEXP, SEXP dimSEXP) { @@ -20968,29 +21806,151 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets); -RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { +// cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_size(nested_sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_strides(nested_stridesSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type offsets(offsetsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(self, nested_size, nested_strides, offsets)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(self, nested_size, nested_strides, offsets)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets); -RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { +// cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_size(nested_sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_strides(nested_stridesSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type offsets(offsetsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(self, nested_size, nested_strides, offsets)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(self, nested_size, nested_strides, offsets)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor(XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor(SEXP selfSEXP, SEXP offsetsSEXP, SEXP dummySEXP, SEXP lengthsSEXP, SEXP ragged_idxSEXP, SEXP min_seqlenSEXP, SEXP max_seqlenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dummy(dummySEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type lengths(lengthsSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type ragged_idx(ragged_idxSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type min_seqlen(min_seqlenSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type max_seqlen(max_seqlenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor(XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor(SEXP selfSEXP, SEXP offsetsSEXP, SEXP dummySEXP, SEXP lengthsSEXP, SEXP ragged_idxSEXP, SEXP min_seqlenSEXP, SEXP max_seqlenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dummy(dummySEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type lengths(lengthsSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type ragged_idx(ragged_idxSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type min_seqlen(min_seqlenSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type max_seqlen(max_seqlenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_values_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_values_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_values_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_values_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_values_copy_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_values_copy_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_values_copy_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_values_copy_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_offsets_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_offsets_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_offsets_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_offsets_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_lengths_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_lengths_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_lengths_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_lengths_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_ragged_idx_self_Tensor +XPtrTorchint64_t cpp_torch_namespace__nested_get_ragged_idx_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_ragged_idx_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_ragged_idx_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_min_seqlen_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_min_seqlen_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_min_seqlen_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_min_seqlen_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_max_seqlen_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_max_seqlen_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_max_seqlen_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_max_seqlen_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor(XPtrTorchTensor any); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor(SEXP anySEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type any(anySEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor(any)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor +Rcpp::List cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor(XPtrTorchTensor nested_size); +RcppExport SEXP _torch_cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor(SEXP nested_sizeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_size(nested_sizeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor(nested_size)); return rcpp_result_gen; END_RCPP } @@ -21587,6 +22547,78 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor out, XPtrTorchTensor save_mean, XPtrTorchTensor save_invstd, XPtrTorchTensor reserve, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP outSEXP, SEXP save_meanSEXP, SEXP save_invstdSEXP, SEXP reserveSEXP, SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type save_mean(save_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type save_invstd(save_invstdSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type reserve(reserveSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor +Rcpp::List cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchOptionalTensor save_mean, XPtrTorchOptionalTensor save_var, XPtrTorchbool update, XPtrTorchdouble eps, std::vector output_mask, XPtrTorchTensor reserve); +RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor(SEXP grad_outSEXP, SEXP inputSEXP, SEXP weightSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP save_meanSEXP, SEXP save_varSEXP, SEXP updateSEXP, SEXP epsSEXP, SEXP output_maskSEXP, SEXP reserveSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type save_mean(save_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type save_var(save_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type update(updateSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< std::vector >::type output_mask(output_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type reserve(reserveSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__sparse_sum_self_Tensor XPtrTorchTensor cpp_torch_namespace__sparse_sum_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace__sparse_sum_self_Tensor(SEXP selfSEXP) { @@ -22315,6 +23347,56 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor +XPtrTorchTensor cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scale_a, XPtrTorchTensor scale_b, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor scale_result, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool use_fast_accum); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(SEXP selfSEXP, SEXP mat2SEXP, SEXP scale_aSEXP, SEXP scale_bSEXP, SEXP biasSEXP, SEXP scale_resultSEXP, SEXP out_dtypeSEXP, SEXP use_fast_accumSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scale_a(scale_aSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scale_b(scale_bSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type scale_result(scale_resultSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type use_fast_accum(use_fast_accumSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor +XPtrTorchTensor cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scale_a, XPtrTorchTensor scale_b, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor scale_result, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool use_fast_accum); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP mat2SEXP, SEXP scale_aSEXP, SEXP scale_bSEXP, SEXP biasSEXP, SEXP scale_resultSEXP, SEXP out_dtypeSEXP, SEXP use_fast_accumSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mat2(mat2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scale_a(scale_aSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scale_b(scale_bSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type scale_result(scale_resultSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type out_dtype(out_dtypeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type use_fast_accum(use_fast_accumSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor(out, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions +XPtrTorchTensor cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions(XPtrTorchint64_t nnz, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef blocksize, XPtrTorchDtype index_dtype, XPtrTorchTensorOptions options); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions(SEXP nnzSEXP, SEXP dense_dimSEXP, SEXP sizeSEXP, SEXP blocksizeSEXP, SEXP index_dtypeSEXP, SEXP optionsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type nnz(nnzSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type dense_dim(dense_dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type blocksize(blocksizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchDtype >::type index_dtype(index_dtypeSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions(nnz, dense_dim, size, blocksize, index_dtype, options)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions XPtrTorchTensor cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(XPtrTorchTensor compressed_indices, XPtrTorchTensor plain_indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options); RcppExport SEXP _torch_cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(SEXP compressed_indicesSEXP, SEXP plain_indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP optionsSEXP) { @@ -22532,51 +23614,55 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions -XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options); -RcppExport SEXP _torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(SEXP indicesSEXP, SEXP valuesSEXP, SEXP optionsSEXP) { +XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(SEXP indicesSEXP, SEXP valuesSEXP, SEXP optionsSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(indices, values, options)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions(indices, values, options, is_coalesced)); return rcpp_result_gen; END_RCPP } // cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions -XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options); -RcppExport SEXP _torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP optionsSEXP) { +XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP optionsSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(indices, values, size, options)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions(indices, values, size, options, is_coalesced)); return rcpp_result_gen; END_RCPP } // cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options); -RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP optionsSEXP) { +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP optionsSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(indices, values, size, options)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef(indices, values, size, options, is_coalesced)); return rcpp_result_gen; END_RCPP } // cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef -void cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size); -RcppExport SEXP _torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP) { +void cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(SEXP indicesSEXP, SEXP valuesSEXP, SEXP sizeSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); - cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(indices, values, size); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef(indices, values, size, is_coalesced); return R_NilValue; END_RCPP } @@ -22655,8 +23741,8 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options); -RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(SEXP sparse_dimSEXP, SEXP dense_dimSEXP, SEXP sizeSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP optionsSEXP) { +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(SEXP sparse_dimSEXP, SEXP dense_dimSEXP, SEXP sizeSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP optionsSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchint64_t >::type sparse_dim(sparse_dimSEXP); @@ -22665,7 +23751,8 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorOptions >::type options(optionsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(sparse_dim, dense_dim, size, indices, values, options)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions(sparse_dim, dense_dim, size, indices, values, options, is_coalesced)); return rcpp_result_gen; END_RCPP } @@ -22680,13 +23767,14 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor -XPtrTorchTensor cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(XPtrTorchTensor grad, XPtrTorchTensor input); -RcppExport SEXP _torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(SEXP gradSEXP, SEXP inputSEXP) { +XPtrTorchTensor cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(XPtrTorchTensor grad, XPtrTorchTensor input, XPtrTorchoptional_bool masked_grad); +RcppExport SEXP _torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(SEXP gradSEXP, SEXP inputSEXP, SEXP masked_gradSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad(gradSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(grad, input)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type masked_grad(masked_gradSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor(grad, input, masked_grad)); return rcpp_result_gen; END_RCPP } @@ -22757,6 +23845,16 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor +Rcpp::List cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor(XPtrTorchTensor dense); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor(SEXP denseSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dense(denseSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor(dense)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor(XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size); RcppExport SEXP _torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor(SEXP selfSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP input_sizeSEXP) { @@ -22773,8 +23871,8 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor -XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups); -RcppExport SEXP _torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(SEXP selfSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP) { +XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size); +RcppExport SEXP _torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(SEXP selfSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP input_sizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); @@ -22782,7 +23880,8 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); Rcpp::traits::input_parameter< XPtrTorchint64_t >::type groups(groupsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(self, padding, stride, dilation, groups)); + Rcpp::traits::input_parameter< XPtrTorchOptionalIntArrayRef >::type input_size(input_sizeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor(self, padding, stride, dilation, groups, input_size)); return rcpp_result_gen; END_RCPP } @@ -23319,14 +24418,14 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType -XPtrTorchbool cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType(XPtrTorchDtype from, XPtrTorchDtype to); -RcppExport SEXP _torch_cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType(SEXP fromSEXP, SEXP toSEXP) { +// cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType +XPtrTorchbool cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType(XPtrTorchDtype from_, XPtrTorchDtype to); +RcppExport SEXP _torch_cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType(SEXP from_SEXP, SEXP toSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchDtype >::type from(fromSEXP); + Rcpp::traits::input_parameter< XPtrTorchDtype >::type from_(from_SEXP); Rcpp::traits::input_parameter< XPtrTorchDtype >::type to(toSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType(from, to)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType(from_, to)); return rcpp_result_gen; END_RCPP } @@ -23370,11 +24469,11 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool -Rcpp::List cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(XPtrTorchTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first); +Rcpp::List cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(XPtrTorchOptionalTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first); RcppExport SEXP _torch_cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(SEXP grad_ySEXP, SEXP grad_hySEXP, SEXP grad_cySEXP, SEXP z_stateSEXP, SEXP cell_state_fwdSEXP, SEXP inputSEXP, SEXP layersOutputsSEXP, SEXP hxSEXP, SEXP paramsSEXP, SEXP has_biasesSEXP, SEXP num_layersSEXP, SEXP dropoutSEXP, SEXP trainSEXP, SEXP bidirectionalSEXP, SEXP batch_firstSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_y(grad_ySEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_y(grad_ySEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_hy(grad_hySEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_cy(grad_cySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type z_state(z_stateSEXP); @@ -23896,6 +24995,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef(XPtrTorchTensor grad_output, XPtrTorchTensor mask, XPtrTorchIntArrayRef sizes); +RcppExport SEXP _torch_cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef(SEXP grad_outputSEXP, SEXP maskSEXP, SEXP sizesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_output(grad_outputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mask(maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type sizes(sizesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef(grad_output, mask, sizes)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor XPtrTorchTensor cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor(XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchoptional_index_int64_t dim, XPtrTorchoptional_int64_t mask_type); RcppExport SEXP _torch_cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor(SEXP selfSEXP, SEXP maskSEXP, SEXP dimSEXP, SEXP mask_typeSEXP) { @@ -25508,6 +26619,31 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t +XPtrTorchTensor cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value); +RcppExport SEXP _torch_cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t(SEXP outSEXP, SEXP selfSEXP, SEXP sizeSEXP, SEXP fill_valueSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type fill_value(fill_valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t(out, self, size, fill_value)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t +XPtrTorchTensor cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t(XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value); +RcppExport SEXP _torch_cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t(SEXP selfSEXP, SEXP sizeSEXP, SEXP fill_valueSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type fill_value(fill_valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t(self, size, fill_value)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_nonzero_numpy_self_Tensor XPtrTorchTensorList cpp_torch_namespace_nonzero_numpy_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_nonzero_numpy_self_Tensor(SEXP selfSEXP) { @@ -26713,6 +27849,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_min_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace_min_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace_min_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_min_out_out_Tensor_self_Tensor(out, self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_fmin_self_Tensor_other_Tensor XPtrTorchTensor cpp_torch_namespace_fmin_self_Tensor_other_Tensor(XPtrTorchTensor self, XPtrTorchTensor other); RcppExport SEXP _torch_cpp_torch_namespace_fmin_self_Tensor_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { @@ -27142,6 +28289,20 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool +XPtrTorchTensor cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool stable, XPtrTorchindex_int64_t dim, XPtrTorchbool descending); +RcppExport SEXP _torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(SEXP outSEXP, SEXP selfSEXP, SEXP stableSEXP, SEXP dimSEXP, SEXP descendingSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type stable(stableSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type descending(descendingSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(out, self, stable, dim, descending)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_argsort_self_Tensor_dim_Dimname XPtrTorchTensor cpp_torch_namespace_argsort_self_Tensor_dim_Dimname(XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool descending); RcppExport SEXP _torch_cpp_torch_namespace_argsort_self_Tensor_dim_Dimname(SEXP selfSEXP, SEXP dimSEXP, SEXP descendingSEXP) { @@ -27539,153 +28700,6 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(self, scalar)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(self, scalar); - return R_NilValue; -END_RCPP -} // cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { @@ -27709,6 +28723,71 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(self, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor +XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor(self, other, alpha)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor(self, other, alpha); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar(self, scalar)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar(self, scalar); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { @@ -27732,6 +28811,48 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(self, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar(self, scalar)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar(self, scalar); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { @@ -27753,6 +28874,69 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(self, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor(self, other)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor(self, other); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar(self, scalar)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar(self, scalar); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { @@ -27774,213 +28958,213 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(self, other)); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(self, scalars)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(self, other); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(self, other)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor(self, other)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor(XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(self, other); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor(self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(self, other)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar(self, scalar)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(self, other); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar(self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(self, other)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList(self, other)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(self, other); + cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList(self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(self, scalars)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar(self, scalars); + cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar(self, scalar)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar(self, scalars); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar(self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList(self, other)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar(self, scalars); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList(self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(self, scalars)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar(self, scalars); + cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar(self, scalar)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar(self, scalars); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar(self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar(self, scalars)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList(self, other)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar(self, scalars); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList(self, other); return R_NilValue; END_RCPP } @@ -28005,6 +29189,48 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar(self, scalar)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar(self, scalar); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList(self, other)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList(self, other); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP scalarsSEXP) { @@ -28026,50 +29252,153 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_exp_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_exp_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_exp_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_zero__self_TensorList -void cpp_torch_namespace__foreach_zero__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_zero__self_TensorList(self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_exp__self_TensorList -void cpp_torch_namespace__foreach_exp__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_exp__self_TensorList(self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sqrt_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_sqrt_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sqrt_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_sqrt__self_TensorList -void cpp_torch_namespace__foreach_sqrt__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sqrt__self_TensorList(self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars); return R_NilValue; END_RCPP } @@ -28244,6 +29573,25 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_exp_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_exp_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp_self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_exp_self_TensorList(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_exp__self_TensorList +void cpp_torch_namespace__foreach_exp__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp__self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_exp__self_TensorList(self); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_expm1_self_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_expm1_self_TensorList(XPtrTorchTensorList self); RcppExport SEXP _torch_cpp_torch_namespace__foreach_expm1_self_TensorList(SEXP selfSEXP) { @@ -28282,6 +29630,90 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_frac_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_frac_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac_self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_frac_self_TensorList(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_frac__self_TensorList +void cpp_torch_namespace__foreach_frac__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac__self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_frac__self_TensorList(self); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(self, tensors1, weights)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList +void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); + cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(self, tensors1, weights); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(self, tensors1, weight)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar +void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); + cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(self, tensors1, weight); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lgamma_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_lgamma_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma_self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lgamma_self_TensorList(self)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__foreach_lgamma__self_TensorList +void cpp_torch_namespace__foreach_lgamma__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma__self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_lgamma__self_TensorList(self); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_log_self_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_log_self_TensorList(XPtrTorchTensorList self); RcppExport SEXP _torch_cpp_torch_namespace__foreach_log_self_TensorList(SEXP selfSEXP) { @@ -28358,6 +29790,16 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_max_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_max_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_max_self_TensorList(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_max_self_TensorList(self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__foreach_neg_self_TensorList XPtrTorchTensorList cpp_torch_namespace__foreach_neg_self_TensorList(XPtrTorchTensorList self); RcppExport SEXP _torch_cpp_torch_namespace__foreach_neg_self_TensorList(SEXP selfSEXP) { @@ -28377,155 +29819,127 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_tan_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_tan_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_norm_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_norm_self_TensorList(XPtrTorchTensorList self, XPtrTorchScalar ord, XPtrTorchoptional_scalar_type dtype); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_norm_self_TensorList(SEXP selfSEXP, SEXP ordSEXP, SEXP dtypeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_tan_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type ord(ordSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_norm_self_TensorList(self, ord, dtype)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_tan__self_TensorList -void cpp_torch_namespace__foreach_tan__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan__self_TensorList(SEXP selfSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_tan__self_TensorList(self); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_tanh_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_tanh_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_tanh_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exponent(exponentSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList(self, exponent)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_tanh__self_TensorList -void cpp_torch_namespace__foreach_tanh__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh__self_TensorList(SEXP selfSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_tanh__self_TensorList(self); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_sin_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_sin_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar(XPtrTorchTensorList self, XPtrTorchScalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sin_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type exponent(exponentSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar(self, exponent)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_sin__self_TensorList -void cpp_torch_namespace__foreach_sin__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin__self_TensorList(SEXP selfSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sin__self_TensorList(self); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_sinh_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_sinh_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sinh_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type exponent(exponentSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar(self, exponent)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_sinh__self_TensorList -void cpp_torch_namespace__foreach_sinh__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh__self_TensorList(SEXP selfSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sinh__self_TensorList(self); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_round_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_round_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_round_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList(XPtrTorchScalar self, XPtrTorchTensorList exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_round_self_TensorList(self)); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exponent(exponentSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList(self, exponent)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_round__self_TensorList -void cpp_torch_namespace__foreach_round__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_round__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_round__self_TensorList(self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList(self, exponent); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lgamma_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_lgamma_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar(XPtrTorchTensorList self, XPtrTorchScalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lgamma_self_TensorList(self)); - return rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchScalar >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar(self, exponent); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lgamma__self_TensorList -void cpp_torch_namespace__foreach_lgamma__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar(SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_lgamma__self_TensorList(self); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar(self, exponent); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_frac_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_frac_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_reciprocal_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_reciprocal_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_frac_self_TensorList(self)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_reciprocal_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_frac__self_TensorList -void cpp_torch_namespace__foreach_frac__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_reciprocal__self_TensorList +void cpp_torch_namespace__foreach_reciprocal__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_frac__self_TensorList(self); + cpp_torch_namespace__foreach_reciprocal__self_TensorList(self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_reciprocal_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_reciprocal_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_round_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_round_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_round_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_reciprocal_self_TensorList(self)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_round_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_reciprocal__self_TensorList -void cpp_torch_namespace__foreach_reciprocal__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_round__self_TensorList +void cpp_torch_namespace__foreach_round__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_round__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_reciprocal__self_TensorList(self); + cpp_torch_namespace__foreach_round__self_TensorList(self); return R_NilValue; END_RCPP } @@ -28548,232 +29962,171 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_trunc_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_trunc_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_sign_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_sign_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sign_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_trunc_self_TensorList(self)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sign_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_trunc__self_TensorList -void cpp_torch_namespace__foreach_trunc__self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc__self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_sign__self_TensorList +void cpp_torch_namespace__foreach_sign__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sign__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_trunc__self_TensorList(self); + cpp_torch_namespace__foreach_sign__self_TensorList(self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +// cpp_torch_namespace__foreach_sin_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_sin_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value); - return R_NilValue; + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sin_self_TensorList(self)); + return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +// cpp_torch_namespace__foreach_sin__self_TensorList +void cpp_torch_namespace__foreach_sin__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value); + cpp_torch_namespace__foreach_sin__self_TensorList(self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_sinh_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_sinh_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars); - return R_NilValue; + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sinh_self_TensorList(self)); + return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_sinh__self_TensorList +void cpp_torch_namespace__foreach_sinh__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_sinh__self_TensorList(self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_sqrt_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_sqrt_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars); - return R_NilValue; + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_sqrt_self_TensorList(self)); + return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_sqrt__self_TensorList +void cpp_torch_namespace__foreach_sqrt__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_sqrt__self_TensorList(self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +// cpp_torch_namespace__foreach_tan_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_tan_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList(self, tensor1, tensor2, value)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_tan_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_tan__self_TensorList +void cpp_torch_namespace__foreach_tan__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars)); - return rcpp_result_gen; + cpp_torch_namespace__foreach_tan__self_TensorList(self); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_tanh_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_tanh_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_tanh_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_tanh__self_TensorList +void cpp_torch_namespace__foreach_tanh__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(self, tensor1, tensor2, scalars)); - return rcpp_result_gen; + cpp_torch_namespace__foreach_tanh__self_TensorList(self); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_trunc_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_trunc_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(self, tensor1, tensor2, scalars)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_trunc_self_TensorList(self)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_norm_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_norm_self_TensorList(XPtrTorchTensorList self, XPtrTorchScalar ord); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_norm_self_TensorList(SEXP selfSEXP, SEXP ordSEXP) { +// cpp_torch_namespace__foreach_trunc__self_TensorList +void cpp_torch_namespace__foreach_trunc__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type ord(ordSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_norm_self_TensorList(self, ord)); - return rcpp_result_gen; + cpp_torch_namespace__foreach_trunc__self_TensorList(self); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +// cpp_torch_namespace__foreach_zero__self_TensorList +void cpp_torch_namespace__foreach_zero__self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero__self_TensorList(SEXP selfSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList(self, tensors1, weights)); - return rcpp_result_gen; + cpp_torch_namespace__foreach_zero__self_TensorList(self); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList -void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +// cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList +void cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList(SEXP selfSEXP, SEXP srcSEXP, SEXP non_blockingSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); - cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList(self, tensors1, weights); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type src(srcSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type non_blocking(non_blockingSEXP); + cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList(self, src, non_blocking); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar -XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { +// cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList(XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList(SEXP selfSEXP, SEXP srcSEXP, SEXP non_blockingSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar(self, tensors1, weight)); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type src(srcSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type non_blocking(non_blockingSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList(self, src, non_blocking)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar -void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); - cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar(self, tensors1, weight); - return R_NilValue; -END_RCPP -} // cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor XPtrTorchTensor cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor(XPtrTorchTensor self, XPtrTorchTensor boundaries, XPtrTorchbool out_int32, XPtrTorchbool right); RcppExport SEXP _torch_cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor(SEXP selfSEXP, SEXP boundariesSEXP, SEXP out_int32SEXP, SEXP rightSEXP) { @@ -28860,6 +30213,22 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar +XPtrTorchTensor cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(XPtrTorchTensor out, XPtrTorchTensor sorted_sequence, XPtrTorchScalar self, XPtrTorchbool out_int32, XPtrTorchbool right, XPtrTorchoptional_string_view side, XPtrTorchOptionalTensor sorter); +RcppExport SEXP _torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(SEXP outSEXP, SEXP sorted_sequenceSEXP, SEXP selfSEXP, SEXP out_int32SEXP, SEXP rightSEXP, SEXP sideSEXP, SEXP sorterSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type sorted_sequence(sorted_sequenceSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type out_int32(out_int32SEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type right(rightSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_string_view >::type side(sideSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type sorter(sorterSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(out, sorted_sequence, self, out_int32, right, side, sorter)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t XPtrTorchTensor cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t(XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchbool out_int32); RcppExport SEXP _torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t(SEXP selfSEXP, SEXP sizeSEXP, SEXP out_int32SEXP) { @@ -34329,6 +35698,16 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__linalg_eigvals_self_Tensor +XPtrTorchTensor cpp_torch_namespace__linalg_eigvals_self_Tensor(XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__linalg_eigvals_self_Tensor(SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__linalg_eigvals_self_Tensor(self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_linalg_eigvals_self_Tensor XPtrTorchTensor cpp_torch_namespace_linalg_eigvals_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_linalg_eigvals_self_Tensor(SEXP selfSEXP) { @@ -35025,6 +36404,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__spsolve_A_Tensor_B_Tensor +XPtrTorchTensor cpp_torch_namespace__spsolve_A_Tensor_B_Tensor(XPtrTorchTensor A, XPtrTorchTensor B, XPtrTorchbool left); +RcppExport SEXP _torch_cpp_torch_namespace__spsolve_A_Tensor_B_Tensor(SEXP ASEXP, SEXP BSEXP, SEXP leftSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type A(ASEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type B(BSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type left(leftSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__spsolve_A_Tensor_B_Tensor(A, B, left)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor XPtrTorchTensor cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor(XPtrTorchTensor out, XPtrTorchTensor A, XPtrTorchTensor B, XPtrTorchbool left); RcppExport SEXP _torch_cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor(SEXP outSEXP, SEXP ASEXP, SEXP BSEXP, SEXP leftSEXP) { @@ -35282,6 +36673,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t +XPtrTorchTensor cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t(XPtrTorchTensor self, XPtrTorchint64_t num_parallel, XPtrTorchbool skip_first); +RcppExport SEXP _torch_cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t(SEXP selfSEXP, SEXP num_parallelSEXP, SEXP skip_firstSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_parallel(num_parallelSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type skip_first(skip_firstSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t(self, num_parallel, skip_first)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef XPtrTorchTensor cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef(XPtrTorchTensor values, XPtrTorchOptionalIntArrayRef addends); RcppExport SEXP _torch_cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef(SEXP valuesSEXP, SEXP addendsSEXP) { @@ -35401,14 +36804,15 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_pad_sequence_sequences_TensorList -XPtrTorchTensor cpp_torch_namespace_pad_sequence_sequences_TensorList(XPtrTorchTensorList sequences, XPtrTorchbool batch_first, XPtrTorchdouble padding_value); -RcppExport SEXP _torch_cpp_torch_namespace_pad_sequence_sequences_TensorList(SEXP sequencesSEXP, SEXP batch_firstSEXP, SEXP padding_valueSEXP) { +XPtrTorchTensor cpp_torch_namespace_pad_sequence_sequences_TensorList(XPtrTorchTensorList sequences, XPtrTorchbool batch_first, XPtrTorchdouble padding_value, XPtrTorchstring_view padding_side); +RcppExport SEXP _torch_cpp_torch_namespace_pad_sequence_sequences_TensorList(SEXP sequencesSEXP, SEXP batch_firstSEXP, SEXP padding_valueSEXP, SEXP padding_sideSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type sequences(sequencesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type batch_first(batch_firstSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type padding_value(padding_valueSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_pad_sequence_sequences_TensorList(sequences, batch_first, padding_value)); + Rcpp::traits::input_parameter< XPtrTorchstring_view >::type padding_side(padding_sideSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_pad_sequence_sequences_TensorList(sequences, batch_first, padding_value, padding_side)); return rcpp_result_gen; END_RCPP } @@ -35878,6 +37282,31 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef +XPtrTorchTensor cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef(XPtrTorchTensor values, XPtrTorchTensorList offsets, XPtrTorchIntArrayRef max_lengths, XPtrTorchdouble padding_value); +RcppExport SEXP _torch_cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef(SEXP valuesSEXP, SEXP offsetsSEXP, SEXP max_lengthsSEXP, SEXP padding_valueSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type offsets(offsetsSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type max_lengths(max_lengthsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type padding_value(padding_valueSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef(values, offsets, max_lengths, padding_value)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList +XPtrTorchTensor cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList(XPtrTorchTensor dense, XPtrTorchTensorList offsets, XPtrTorchoptional_int64_t total_L); +RcppExport SEXP _torch_cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList(SEXP denseSEXP, SEXP offsetsSEXP, SEXP total_LSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dense(denseSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type offsets(offsetsSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type total_L(total_LSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList(dense, offsets, total_L)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor XPtrTorchTensor cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor(XPtrTorchTensor self, XPtrTorchTensor query); RcppExport SEXP _torch_cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor(SEXP selfSEXP, SEXP querySEXP) { @@ -35889,6 +37318,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t +XPtrTorchTensor cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t(XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchoptional_scalar_type dtype); +RcppExport SEXP _torch_cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t(SEXP selfSEXP, SEXP dimSEXP, SEXP dtypeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t(self, dim, dtype)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor XPtrTorchTensor cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchoptional_int64_t mask_type); RcppExport SEXP _torch_cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(SEXP srcSEXP, SEXP embed_dimSEXP, SEXP num_headsSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP use_geluSEXP, SEXP norm_firstSEXP, SEXP epsSEXP, SEXP norm_weight_1SEXP, SEXP norm_bias_1SEXP, SEXP norm_weight_2SEXP, SEXP norm_bias_2SEXP, SEXP ffn_weight_1SEXP, SEXP ffn_bias_1SEXP, SEXP ffn_weight_2SEXP, SEXP ffn_bias_2SEXP, SEXP maskSEXP, SEXP mask_typeSEXP) { @@ -35941,8 +37382,8 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor -XPtrTorchTensor cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal); -RcppExport SEXP _torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP) { +XPtrTorchTensor cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa); +RcppExport SEXP _torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP scaleSEXP, SEXP enable_gqaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); @@ -35951,13 +37392,15 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_mask(attn_maskSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type enable_gqa(enable_gqaSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor -Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool need_attn_weights, XPtrTorchbool is_causal); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP need_attn_weightsSEXP, SEXP is_causalSEXP) { +// cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor +XPtrTorchint64_t cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP scaleSEXP, SEXP enable_gqaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); @@ -35965,15 +37408,16 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_mask(attn_maskSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type need_attn_weights(need_attn_weightsSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type enable_gqa(enable_gqaSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor -XPtrTorchint64_t cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal); -RcppExport SEXP _torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP) { +// cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP dropout_maskSEXP, SEXP scaleSEXP, SEXP enable_gqaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); @@ -35982,13 +37426,16 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_mask(attn_maskSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal)); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type dropout_mask(dropout_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type enable_gqa(enable_gqaSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor -Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP dropout_maskSEXP) { +// cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_maskSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP dropout_maskSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); @@ -35998,13 +37445,14 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type dropout_mask(dropout_maskSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale)); return rcpp_result_gen; END_RCPP } // cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor -Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP) { +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); @@ -36013,13 +37461,47 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type return_debug_mask(return_debug_maskSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, dropout_p, is_causal, return_debug_mask)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, dropout_p, is_causal, return_debug_mask, scale)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor attn_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP attn_maskSEXP, SEXP scaleSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_mask(attn_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor(query, key, value, dropout_p, is_causal, attn_mask, scale)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t -Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchint64_t philox_seed, XPtrTorchint64_t philox_offset); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP) { +// cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_biasSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP, SEXP scaleSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_bias(attn_biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type return_debug_mask(return_debug_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor(query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); @@ -36034,78 +37516,167 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_k(max_kSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type philox_seed(philox_seedSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type philox_offset(philox_offsetSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool -Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchbool compute_log_sumexp, XPtrTorchbool is_causal); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP compute_log_sumexpSEXP, SEXP is_causalSEXP) { +// cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor attn_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP attn_maskSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type logsumexp(logsumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_mask(attn_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor +Rcpp::List cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor attn_bias, std::vector grad_input_mask, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_biasSEXP, SEXP grad_input_maskSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP scaleSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type attn_bias(attn_biasSEXP); + Rcpp::traits::input_parameter< std::vector >::type grad_input_mask(grad_input_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type logsumexp(logsumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type cum_seq_q(cum_seq_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type cum_seq_k(cum_seq_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_q(max_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_k(max_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool +Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchbool compute_log_sumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_biasSEXP, SEXP compute_log_sumexpSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP scaleSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_bias(attn_biasSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type compute_log_sumexp(compute_log_sumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool(query, key, value, compute_log_sumexp, is_causal)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor -Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchbool is_causal, XPtrTorchbool chunk_grad_outputs); -RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(SEXP grad_out_SEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP is_causalSEXP, SEXP chunk_grad_outputsSEXP) { +// cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4 +Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4(XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor attn_bias, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchdouble dropout_p, std::vector grad_input_mask, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4(SEXP grad_out_SEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_biasSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP dropout_pSEXP, SEXP grad_input_maskSEXP, SEXP is_causalSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out_(grad_out_SEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type attn_bias(attn_biasSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type logsumexp(logsumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< std::vector >::type grad_input_mask(grad_input_maskSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type chunk_grad_outputs(chunk_grad_outputsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor -XPtrTorchbool cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchbool is_causal); -RcppExport SEXP _torch_cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP is_causalSEXP) { +// cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool +Rcpp::List cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchbool compute_log_sumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP attn_biasSEXP, SEXP compute_log_sumexpSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type attn_bias(attn_biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type compute_log_sumexp(compute_log_sumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor(query, key, value, is_causal)); + Rcpp::traits::input_parameter< XPtrTorchbool >::type return_debug_mask(return_debug_maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool -Rcpp::List cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask); -RcppExport SEXP _torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP) { +// cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool +Rcpp::List cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchTensor attn_bias, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale); +RcppExport SEXP _torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP attn_biasSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP scaleSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type logsumexp(logsumexpSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type attn_bias(attn_biasSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type cum_seq_q(cum_seq_qSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type cum_seq_k(cum_seq_kSEXP); Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_q(max_qSEXP); Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_k(max_kSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool +Rcpp::List cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor cum_seq_q, XPtrTorchOptionalTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t window_size_left, XPtrTorchoptional_int64_t window_size_right, XPtrTorchOptionalTensor seqused_k, XPtrTorchOptionalTensor alibi_slopes); +RcppExport SEXP _torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP return_debug_maskSEXP, SEXP scaleSEXP, SEXP window_size_leftSEXP, SEXP window_size_rightSEXP, SEXP seqused_kSEXP, SEXP alibi_slopesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cum_seq_q(cum_seq_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cum_seq_k(cum_seq_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_q(max_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_k(max_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type return_debug_mask(return_debug_maskSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size_left(window_size_leftSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size_right(window_size_rightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type seqused_k(seqused_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type alibi_slopes(alibi_slopesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t -Rcpp::List cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchint64_t philox_seed, XPtrTorchint64_t philox_offset); -RcppExport SEXP _torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP) { +// cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor +Rcpp::List cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t window_size_left, XPtrTorchoptional_int64_t window_size_right); +RcppExport SEXP _torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(SEXP grad_outSEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP cum_seq_qSEXP, SEXP cum_seq_kSEXP, SEXP max_qSEXP, SEXP max_kSEXP, SEXP dropout_pSEXP, SEXP is_causalSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP scaleSEXP, SEXP window_size_leftSEXP, SEXP window_size_rightSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out(grad_outSEXP); @@ -36120,43 +37691,64 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_k(max_kSEXP); Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type philox_seed(philox_seedSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type philox_offset(philox_offsetSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size_left(window_size_leftSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size_right(window_size_rightSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t -Rcpp::List cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchoptional_int64_t max_seqlen_q, XPtrTorchbool compute_log_sumexp, XPtrTorchbool causal); -RcppExport SEXP _torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP cu_seqlens_qSEXP, SEXP cu_seqlens_kSEXP, SEXP max_seqlen_qSEXP, SEXP compute_log_sumexpSEXP, SEXP causalSEXP) { +// cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t +Rcpp::List cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchoptional_int64_t max_seqlen_q, XPtrTorchoptional_int64_t max_seqlen_k, XPtrTorchdouble dropout_p, XPtrTorchint64_t custom_mask_type, XPtrTorchbool compute_log_sumexp, XPtrTorchOptionaldouble scale, XPtrTorchOptionalTensor seqlen_k, XPtrTorchoptional_int64_t window_size); +RcppExport SEXP _torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP biasSEXP, SEXP cu_seqlens_qSEXP, SEXP cu_seqlens_kSEXP, SEXP max_seqlen_qSEXP, SEXP max_seqlen_kSEXP, SEXP dropout_pSEXP, SEXP custom_mask_typeSEXP, SEXP compute_log_sumexpSEXP, SEXP scaleSEXP, SEXP seqlen_kSEXP, SEXP window_sizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cu_seqlens_q(cu_seqlens_qSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cu_seqlens_k(cu_seqlens_kSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max_seqlen_q(max_seqlen_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type max_seqlen_k(max_seqlen_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type custom_mask_type(custom_mask_typeSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type compute_log_sumexp(compute_log_sumexpSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type causal(causalSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal)); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type seqlen_k(seqlen_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size(window_sizeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor -Rcpp::List cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchbool is_causal, XPtrTorchbool chunk_grad_outputs); -RcppExport SEXP _torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(SEXP grad_out_SEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP outSEXP, SEXP logsumexpSEXP, SEXP is_causalSEXP, SEXP chunk_grad_outputsSEXP) { +// cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool +Rcpp::List cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool(XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor bias, XPtrTorchTensor out, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchint64_t max_seqlen_q, XPtrTorchint64_t max_seqlen_k, XPtrTorchTensor logsumexp, XPtrTorchdouble dropout_p, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchint64_t custom_mask_type, XPtrTorchbool bias_requires_grad, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t num_splits_key, XPtrTorchoptional_int64_t window_size, XPtrTorchbool shared_storage_dqdkdv); +RcppExport SEXP _torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool(SEXP grad_out_SEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP biasSEXP, SEXP outSEXP, SEXP cu_seqlens_qSEXP, SEXP cu_seqlens_kSEXP, SEXP max_seqlen_qSEXP, SEXP max_seqlen_kSEXP, SEXP logsumexpSEXP, SEXP dropout_pSEXP, SEXP philox_seedSEXP, SEXP philox_offsetSEXP, SEXP custom_mask_typeSEXP, SEXP bias_requires_gradSEXP, SEXP scaleSEXP, SEXP num_splits_keySEXP, SEXP window_sizeSEXP, SEXP shared_storage_dqdkdvSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_out_(grad_out_SEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cu_seqlens_q(cu_seqlens_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type cu_seqlens_k(cu_seqlens_kSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_seqlen_q(max_seqlen_qSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type max_seqlen_k(max_seqlen_kSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type logsumexp(logsumexpSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type is_causal(is_causalSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type chunk_grad_outputs(chunk_grad_outputsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs)); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_seed(philox_seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type philox_offset(philox_offsetSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type custom_mask_type(custom_mask_typeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type bias_requires_grad(bias_requires_gradSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionaldouble >::type scale(scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type num_splits_key(num_splits_keySEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type window_size(window_sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type shared_storage_dqdkdv(shared_storage_dqdkdvSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv)); return rcpp_result_gen; END_RCPP } @@ -36173,6 +37765,19 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t +XPtrTorchTensor cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t(XPtrTorchTensor self, XPtrTorchdouble dropout_p, XPtrTorchint64_t seed, XPtrTorchint64_t offset); +RcppExport SEXP _torch_cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t(SEXP selfSEXP, SEXP dropout_pSEXP, SEXP seedSEXP, SEXP offsetSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dropout_p(dropout_pSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type seed(seedSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type offset(offsetSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t(self, dropout_p, seed, offset)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor XPtrTorchTensor cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask); RcppExport SEXP _torch_cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP embed_dimSEXP, SEXP num_headSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP maskSEXP) { @@ -36213,59 +37818,6 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor -Rcpp::List cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value); -RcppExport SEXP _torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(SEXP srcSEXP, SEXP embed_dimSEXP, SEXP num_headsSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP use_geluSEXP, SEXP norm_firstSEXP, SEXP epsSEXP, SEXP norm_weight_1SEXP, SEXP norm_bias_1SEXP, SEXP norm_weight_2SEXP, SEXP norm_bias_2SEXP, SEXP ffn_weight_1SEXP, SEXP ffn_bias_1SEXP, SEXP ffn_weight_2SEXP, SEXP ffn_bias_2SEXP, SEXP maskSEXP, SEXP incr_keySEXP, SEXP incr_valueSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type src(srcSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type embed_dim(embed_dimSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_heads(num_headsSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_weight(qkv_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_bias(qkv_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_weight(proj_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_bias(proj_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type use_gelu(use_geluSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type norm_first(norm_firstSEXP); - Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_weight_1(norm_weight_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_bias_1(norm_bias_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_weight_2(norm_weight_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_bias_2(norm_bias_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_weight_1(ffn_weight_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_bias_1(ffn_bias_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_weight_2(ffn_weight_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_bias_2(ffn_bias_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type mask(maskSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_key(incr_keySEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_value(incr_valueSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor -Rcpp::List cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value, XPtrTorchbool need_weights, XPtrTorchbool average_attn_weights); -RcppExport SEXP _torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP embed_dimSEXP, SEXP num_headSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP maskSEXP, SEXP incr_keySEXP, SEXP incr_valueSEXP, SEXP need_weightsSEXP, SEXP average_attn_weightsSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type embed_dim(embed_dimSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_head(num_headSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_weight(qkv_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_bias(qkv_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_weight(proj_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_bias(proj_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type mask(maskSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_key(incr_keySEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_value(incr_valueSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type need_weights(need_weightsSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type average_attn_weights(average_attn_weightsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights)); - return rcpp_result_gen; -END_RCPP -} // cpp_torch_namespace_special_bessel_j0_self_Tensor XPtrTorchTensor cpp_torch_namespace_special_bessel_j0_self_Tensor(XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_special_bessel_j0_self_Tensor(SEXP selfSEXP) { @@ -37361,6 +38913,29 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +void cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool void cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); RcppExport SEXP _torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { @@ -37384,6 +38959,98 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +void cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +void cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +void cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool +void cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP state_sumsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP lr_decaySEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_sums(state_sumsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr_decay(lr_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor +void cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor(XPtrTorchTensor input, XPtrTorchTensor output); +RcppExport SEXP _torch_cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor(SEXP inputSEXP, SEXP outputSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type output(outputSEXP); + cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor(input, output); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor XPtrTorchTensor cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor other, XPtrTorchint64_t self_num_batch_dims); RcppExport SEXP _torch_cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP self_num_batch_dimsSEXP) { @@ -37615,6 +39282,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor +XPtrTorchTensor cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor(out, self, other)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t XPtrTorchTensor cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t(XPtrTorchTensor out, XPtrTorchint64_t window_length); RcppExport SEXP _torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t(SEXP outSEXP, SEXP window_lengthSEXP) { @@ -38023,25 +39702,6 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool -XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32); -RcppExport SEXP _torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(SEXP outSEXP, SEXP selfSEXP, SEXP weightSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP benchmarkSEXP, SEXP deterministicSEXP, SEXP allow_tf32SEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type weight(weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type padding(paddingSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type groups(groupsSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type benchmark(benchmarkSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type deterministic(deterministicSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type allow_tf32(allow_tf32SEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32)); - return rcpp_result_gen; -END_RCPP -} // cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef output_padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32); RcppExport SEXP _torch_cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool(SEXP outSEXP, SEXP selfSEXP, SEXP weightSEXP, SEXP paddingSEXP, SEXP output_paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP benchmarkSEXP, SEXP deterministicSEXP, SEXP allow_tf32SEXP) { @@ -38419,6 +40079,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef(XPtrTorchTensor out, XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef physical_layout); +RcppExport SEXP _torch_cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef(SEXP outSEXP, SEXP sizeSEXP, SEXP physical_layoutSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type physical_layout(physical_layoutSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef(out, size, physical_layout)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef XPtrTorchTensor cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef size); RcppExport SEXP _torch_cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP sizeSEXP) { @@ -38621,6 +40293,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar +XPtrTorchTensor cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchScalar other); +RcppExport SEXP _torch_cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type other(otherSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar(out, self, other)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList XPtrTorchTensor cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList(XPtrTorchTensor out, XPtrTorchIntArrayRef size, XPtrTorchScalar fill_value, XPtrTorchOptionalDimnameList names); RcppExport SEXP _torch_cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList(SEXP outSEXP, SEXP sizeSEXP, SEXP fill_valueSEXP, SEXP namesSEXP) { @@ -38892,9 +40576,9 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); -RcppExport SEXP _torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { +// cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate); +RcppExport SEXP _torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -38902,13 +40586,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchOptionalIndexTensorList >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(out, self, indices, values, accumulate)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(out, self, indices, values, accumulate)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); -RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { +// cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); +RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -38917,13 +40601,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type unsafe(unsafeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(out, self, indices, values, accumulate, unsafe)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(out, self, indices, values, accumulate, unsafe)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); -RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { +// cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor +XPtrTorchTensor cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe); +RcppExport SEXP _torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(SEXP selfSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP accumulateSEXP, SEXP unsafeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); @@ -38931,7 +40615,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate(accumulateSEXP); Rcpp::traits::input_parameter< XPtrTorchbool >::type unsafe(unsafeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor(self, indices, values, accumulate, unsafe)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor(self, indices, values, accumulate, unsafe)); return rcpp_result_gen; END_RCPP } @@ -39214,6 +40898,22 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef +XPtrTorchTensor cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode); +RcppExport SEXP _torch_cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP kernel_sizeSEXP, SEXP strideSEXP, SEXP paddingSEXP, SEXP dilationSEXP, SEXP ceil_modeSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type kernel_size(kernel_sizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type padding(paddingSEXP); + Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type ceil_mode(ceil_modeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef(out, self, kernel_size, stride, padding, dilation, ceil_mode)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_median_out_out_Tensor_self_Tensor XPtrTorchTensor cpp_torch_namespace_median_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self); RcppExport SEXP _torch_cpp_torch_namespace_median_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP) { @@ -39558,6 +41258,25 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP out0SEXP, SEXP out1SEXP, SEXP out2SEXP, SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out0(out0SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out1(out1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out2(out2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double Rcpp::List cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double(XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor input, XPtrTorchdouble eps); RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double(SEXP out0SEXP, SEXP out1SEXP, SEXP inputSEXP, SEXP epsSEXP) { @@ -39652,9 +41371,9 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor -XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(XPtrTorchTensor out, XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor mean_dy, XPtrTorchTensor mean_dy_xmu, XPtrTorchTensor count); -RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(SEXP outSEXP, SEXP grad_outSEXP, SEXP inputSEXP, SEXP meanSEXP, SEXP invstdSEXP, SEXP weightSEXP, SEXP mean_dySEXP, SEXP mean_dy_xmuSEXP, SEXP countSEXP) { +// cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor +XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(XPtrTorchTensor out, XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor sum_dy, XPtrTorchTensor sum_dy_xmu, XPtrTorchTensor count); +RcppExport SEXP _torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(SEXP outSEXP, SEXP grad_outSEXP, SEXP inputSEXP, SEXP meanSEXP, SEXP invstdSEXP, SEXP weightSEXP, SEXP sum_dySEXP, SEXP sum_dy_xmuSEXP, SEXP countSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -39663,10 +41382,10 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean(meanSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type invstd(invstdSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean_dy(mean_dySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type mean_dy_xmu(mean_dy_xmuSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type sum_dy(sum_dySEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type sum_dy_xmu(sum_dy_xmuSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type count(countSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor(out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor(out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count)); return rcpp_result_gen; END_RCPP } @@ -40262,6 +41981,17 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor(out, self)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor XPtrTorchTensor cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor(XPtrTorchTensor out, XPtrTorchTensor padded, XPtrTorchTensor nt_example); RcppExport SEXP _torch_cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor(SEXP outSEXP, SEXP paddedSEXP, SEXP nt_exampleSEXP) { @@ -40274,17 +42004,45 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets); -RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { +// cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP nested_sizeSEXP, SEXP nested_stridesSEXP, SEXP offsetsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_size(nested_sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type nested_strides(nested_stridesSEXP); - Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type offsets(offsetsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef(out, self, nested_size, nested_strides, offsets)); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor(out, self, nested_size, nested_strides, offsets)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen); +RcppExport SEXP _torch_cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP offsetsSEXP, SEXP dummySEXP, SEXP lengthsSEXP, SEXP ragged_idxSEXP, SEXP min_seqlenSEXP, SEXP max_seqlenSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type offsets(offsetsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type dummy(dummySEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type lengths(lengthsSEXP); + Rcpp::traits::input_parameter< XPtrTorchint64_t >::type ragged_idx(ragged_idxSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type min_seqlen(min_seqlenSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type max_seqlen(max_seqlenSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor(out, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self); +RcppExport SEXP _torch_cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor(out, self)); return rcpp_result_gen; END_RCPP } @@ -40563,6 +42321,42 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double +Rcpp::List cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor out3, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps); +RcppExport SEXP _torch_cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(SEXP out0SEXP, SEXP out1SEXP, SEXP out2SEXP, SEXP out3SEXP, SEXP inputSEXP, SEXP weightSEXP, SEXP biasSEXP, SEXP running_meanSEXP, SEXP running_varSEXP, SEXP momentumSEXP, SEXP epsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out0(out0SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out1(out1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out2(out2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out3(out3SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type input(inputSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type weight(weightSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type bias(biasSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_mean(running_meanSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type running_var(running_varSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef XPtrTorchTensor cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIndexIntArrayRef dim); RcppExport SEXP _torch_cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP dimSEXP) { @@ -40869,8 +42663,8 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(XPtrTorchTensor out, XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values); -RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(SEXP outSEXP, SEXP sparse_dimSEXP, SEXP dense_dimSEXP, SEXP sizeSEXP, SEXP indicesSEXP, SEXP valuesSEXP) { +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(XPtrTorchTensor out, XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchoptional_bool is_coalesced); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(SEXP outSEXP, SEXP sparse_dimSEXP, SEXP dense_dimSEXP, SEXP sizeSEXP, SEXP indicesSEXP, SEXP valuesSEXP, SEXP is_coalescedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -40879,7 +42673,8 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type size(sizeSEXP); Rcpp::traits::input_parameter< XPtrTorchIndexTensor >::type indices(indicesSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type values(valuesSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(out, sparse_dim, dense_dim, size, indices, values)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type is_coalesced(is_coalescedSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor(out, sparse_dim, dense_dim, size, indices, values, is_coalesced)); return rcpp_result_gen; END_RCPP } @@ -40949,15 +42744,29 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor +XPtrTorchTensor cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchbool accumulate_matches); +RcppExport SEXP _torch_cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP maskSEXP, SEXP accumulate_matchesSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type mask(maskSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type accumulate_matches(accumulate_matchesSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor(out, self, mask, accumulate_matches)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor -XPtrTorchTensor cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype); -RcppExport SEXP _torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dtypeSEXP) { +XPtrTorchTensor cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad); +RcppExport SEXP _torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dtypeSEXP, SEXP masked_gradSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(out, self, dtype)); + Rcpp::traits::input_parameter< XPtrTorchoptional_bool >::type masked_grad(masked_gradSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor(out, self, dtype, masked_grad)); return rcpp_result_gen; END_RCPP } @@ -41020,21 +42829,21 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t -XPtrTorchTensor cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t sparse_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(SEXP outSEXP, SEXP selfSEXP, SEXP sparse_dimSEXP) { +// cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t +XPtrTorchTensor cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t sparse_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(SEXP outSEXP, SEXP selfSEXP, SEXP sparse_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchint64_t >::type sparse_dim(sparse_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(out, self, sparse_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t(out, self, sparse_dim)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor -XPtrTorchTensor cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP layoutSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +// cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP layoutSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -41042,57 +42851,57 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchLayout >::type layout(layoutSEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalIntArrayRef >::type blocksize(blocksizeSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor(out, self, layout, blocksize, dense_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor(out, self, layout, blocksize, dense_dim)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor -XPtrTorchTensor cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dense_dimSEXP) { +// cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dense_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor(out, self, dense_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor(out, self, dense_dim)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor -XPtrTorchTensor cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dense_dimSEXP) { +// cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor +XPtrTorchTensor cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP dense_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor(out, self, dense_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor(out, self, dense_dim)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef -XPtrTorchTensor cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +// cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef +XPtrTorchTensor cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type blocksize(blocksizeSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(out, self, blocksize, dense_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(out, self, blocksize, dense_dim)); return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef -XPtrTorchTensor cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); -RcppExport SEXP _torch_cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { +// cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef +XPtrTorchTensor cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim); +RcppExport SEXP _torch_cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(SEXP outSEXP, SEXP selfSEXP, SEXP blocksizeSEXP, SEXP dense_dimSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type blocksize(blocksizeSEXP); Rcpp::traits::input_parameter< XPtrTorchoptional_int64_t >::type dense_dim(dense_dimSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(out, self, blocksize, dense_dim)); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef(out, self, blocksize, dense_dim)); return rcpp_result_gen; END_RCPP } @@ -41125,8 +42934,8 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor -XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups); -RcppExport SEXP _torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP) { +XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size); +RcppExport SEXP _torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP paddingSEXP, SEXP strideSEXP, SEXP dilationSEXP, SEXP groupsSEXP, SEXP input_sizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); @@ -41135,7 +42944,8 @@ BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type stride(strideSEXP); Rcpp::traits::input_parameter< XPtrTorchIntArrayRef >::type dilation(dilationSEXP); Rcpp::traits::input_parameter< XPtrTorchint64_t >::type groups(groupsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(out, self, padding, stride, dilation, groups)); + Rcpp::traits::input_parameter< XPtrTorchOptionalIntArrayRef >::type input_size(input_sizeSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor(out, self, padding, stride, dilation, groups, input_size)); return rcpp_result_gen; END_RCPP } @@ -41456,13 +43266,13 @@ BEGIN_RCPP END_RCPP } // cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool -void cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(XPtrTorchTensor out0, XPtrTorchTensorList out1, XPtrTorchTensorList out2, XPtrTorchTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first); +void cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(XPtrTorchTensor out0, XPtrTorchTensorList out1, XPtrTorchTensorList out2, XPtrTorchOptionalTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first); RcppExport SEXP _torch_cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool(SEXP out0SEXP, SEXP out1SEXP, SEXP out2SEXP, SEXP grad_ySEXP, SEXP grad_hySEXP, SEXP grad_cySEXP, SEXP z_stateSEXP, SEXP cell_state_fwdSEXP, SEXP inputSEXP, SEXP layersOutputsSEXP, SEXP hxSEXP, SEXP paramsSEXP, SEXP has_biasesSEXP, SEXP num_layersSEXP, SEXP dropoutSEXP, SEXP trainSEXP, SEXP bidirectionalSEXP, SEXP batch_firstSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensor >::type out0(out0SEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out1(out1SEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out2(out2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type grad_y(grad_ySEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_y(grad_ySEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_hy(grad_hySEXP); Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_cy(grad_cySEXP); Rcpp::traits::input_parameter< XPtrTorchTensor >::type z_state(z_stateSEXP); @@ -42222,20 +44032,6 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool -XPtrTorchTensor cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool stable, XPtrTorchindex_int64_t dim, XPtrTorchbool descending); -RcppExport SEXP _torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(SEXP outSEXP, SEXP selfSEXP, SEXP stableSEXP, SEXP dimSEXP, SEXP descendingSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type stable(stableSEXP); - Rcpp::traits::input_parameter< XPtrTorchindex_int64_t >::type dim(dimSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type descending(descendingSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool(out, self, stable, dim, descending)); - return rcpp_result_gen; -END_RCPP -} // cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t XPtrTorchTensor cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t(XPtrTorchTensor out, XPtrTorchTensor grad_in, XPtrTorchIntArrayRef input_sizes, XPtrTorchindex_int64_t dim, XPtrTorchint64_t size, XPtrTorchint64_t step); RcppExport SEXP _torch_cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t(SEXP outSEXP, SEXP grad_inSEXP, SEXP input_sizesSEXP, SEXP dimSEXP, SEXP sizeSEXP, SEXP stepSEXP) { @@ -42331,115 +44127,127 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(out, self, other, alpha); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor(out, self, other, alpha); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); + cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(out, self, other, alpha); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar -void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); - cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { +// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); - cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList(out, self, other, alpha); + cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP, SEXP alphaSEXP) { +// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type alpha(alphaSEXP); - cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList(out, self, other, alpha); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor(out, self, other); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } @@ -42454,113 +44262,113 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type other(otherSEXP); + cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor(out, self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList -void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); - cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(out, self, scalars); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); return R_NilValue; END_RCPP } @@ -42575,6 +44383,28 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar +void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type scalar(scalarSEXP); + cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar(out, self, scalar); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList +void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP otherSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type other(otherSEXP); + cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList(out, self, other); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars); RcppExport SEXP _torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP scalarsSEXP) { @@ -42586,43 +44416,81 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(out, self, tensor1, tensor2, value); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(out, self, tensor1, tensor2, scalars); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_zero_self_TensorList -XPtrTorchTensorList cpp_torch_namespace__foreach_zero_self_TensorList(XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero_self_TensorList(SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_zero_self_TensorList(self)); - return rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(out, self, tensor1, tensor2, scalars); + return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); + cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(out, self, tensor1, tensor2, value); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(out, self, tensor1, tensor2, scalars); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); + cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(out, self, tensor1, tensor2, scalars); return R_NilValue; END_RCPP } @@ -42716,6 +44584,16 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList(out, self); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList void cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); RcppExport SEXP _torch_cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { @@ -42736,6 +44614,50 @@ BEGIN_RCPP return R_NilValue; END_RCPP } +// cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(out, self); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList +void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); + cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(out, self, tensors1, weights); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar +void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); + cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(out, self, tensors1, weight); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(out, self); + return R_NilValue; +END_RCPP +} // cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList void cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); RcppExport SEXP _torch_cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { @@ -42776,93 +44698,88 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(out, self); + cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(out, self); + cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar ord, XPtrTorchoptional_scalar_type dtype); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP ordSEXP, SEXP dtypeSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type ord(ordSEXP); + Rcpp::traits::input_parameter< XPtrTorchoptional_scalar_type >::type dtype(dtypeSEXP); + cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(out, self, ord, dtype); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList(out, self, exponent); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(out, self); - return R_NilValue; -END_RCPP -} -// cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { -BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchScalar >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar(out, self, exponent); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP exponentSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList(out, self); + Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type exponent(exponentSEXP); + cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar(out, self, exponent); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList(out, self); + cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList(out, self); + cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } @@ -42876,126 +44793,105 @@ BEGIN_RCPP return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { +// cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(out, self); + cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +// cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(out, self, tensor1, tensor2, value); + cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP valueSEXP) { +// cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type value(valueSEXP); - cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList(out, self, tensor1, tensor2, value); + cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(out, self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(out, self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchvector_Scalar >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar(out, self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP tensor1SEXP, SEXP tensor2SEXP, SEXP scalarsSEXP) { +// cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor1(tensor1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensor2(tensor2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type scalars(scalarsSEXP); - cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor(out, self, tensor1, tensor2, scalars); + cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList -void cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar ord); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP ordSEXP) { +// cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList +void cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(SEXP outSEXP, SEXP selfSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type ord(ordSEXP); - cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList(out, self, ord); + cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList(out, self); return R_NilValue; END_RCPP } -// cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList -void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightsSEXP) { +// cpp_torch_namespace__foreach_zero_self_TensorList +XPtrTorchTensorList cpp_torch_namespace__foreach_zero_self_TensorList(XPtrTorchTensorList self); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_zero_self_TensorList(SEXP selfSEXP) { BEGIN_RCPP - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::RObject rcpp_result_gen; Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type weights(weightsSEXP); - cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList(out, self, tensors1, weights); - return R_NilValue; + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__foreach_zero_self_TensorList(self)); + return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar -void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight); -RcppExport SEXP _torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(SEXP outSEXP, SEXP selfSEXP, SEXP tensors1SEXP, SEXP weightSEXP) { +// cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList +void cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking); +RcppExport SEXP _torch_cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList(SEXP outSEXP, SEXP selfSEXP, SEXP srcSEXP, SEXP non_blockingSEXP) { BEGIN_RCPP Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensorList >::type tensors1(tensors1SEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type weight(weightSEXP); - cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar(out, self, tensors1, weight); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type src(srcSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type non_blocking(non_blockingSEXP); + cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList(out, self, src, non_blocking); return R_NilValue; END_RCPP } @@ -43013,22 +44909,6 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar -XPtrTorchTensor cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(XPtrTorchTensor out, XPtrTorchTensor sorted_sequence, XPtrTorchScalar self, XPtrTorchbool out_int32, XPtrTorchbool right, XPtrTorchoptional_string_view side, XPtrTorchOptionalTensor sorter); -RcppExport SEXP _torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(SEXP outSEXP, SEXP sorted_sequenceSEXP, SEXP selfSEXP, SEXP out_int32SEXP, SEXP rightSEXP, SEXP sideSEXP, SEXP sorterSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out(outSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type sorted_sequence(sorted_sequenceSEXP); - Rcpp::traits::input_parameter< XPtrTorchScalar >::type self(selfSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type out_int32(out_int32SEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type right(rightSEXP); - Rcpp::traits::input_parameter< XPtrTorchoptional_string_view >::type side(sideSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type sorter(sorterSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar(out, sorted_sequence, self, out_int32, right, side, sorter)); - return rcpp_result_gen; -END_RCPP -} // cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t XPtrTorchTensor cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t(XPtrTorchTensor out, XPtrTorchTensor glu, XPtrTorchTensor x, XPtrTorchTensor dx, XPtrTorchindex_int64_t dim); RcppExport SEXP _torch_cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t(SEXP outSEXP, SEXP gluSEXP, SEXP xSEXP, SEXP dxSEXP, SEXP dimSEXP) { @@ -43855,66 +45735,6 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor -Rcpp::List cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value); -RcppExport SEXP _torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(SEXP out0SEXP, SEXP out1SEXP, SEXP out2SEXP, SEXP srcSEXP, SEXP embed_dimSEXP, SEXP num_headsSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP use_geluSEXP, SEXP norm_firstSEXP, SEXP epsSEXP, SEXP norm_weight_1SEXP, SEXP norm_bias_1SEXP, SEXP norm_weight_2SEXP, SEXP norm_bias_2SEXP, SEXP ffn_weight_1SEXP, SEXP ffn_bias_1SEXP, SEXP ffn_weight_2SEXP, SEXP ffn_bias_2SEXP, SEXP maskSEXP, SEXP incr_keySEXP, SEXP incr_valueSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out0(out0SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out1(out1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out2(out2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type src(srcSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type embed_dim(embed_dimSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_heads(num_headsSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_weight(qkv_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_bias(qkv_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_weight(proj_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_bias(proj_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type use_gelu(use_geluSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type norm_first(norm_firstSEXP); - Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_weight_1(norm_weight_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_bias_1(norm_bias_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_weight_2(norm_weight_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type norm_bias_2(norm_bias_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_weight_1(ffn_weight_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_bias_1(ffn_bias_1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_weight_2(ffn_weight_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type ffn_bias_2(ffn_bias_2SEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type mask(maskSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_key(incr_keySEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_value(incr_valueSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor(out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value)); - return rcpp_result_gen; -END_RCPP -} -// cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor -Rcpp::List cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor out3, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value, XPtrTorchbool need_weights, XPtrTorchbool average_attn_weights); -RcppExport SEXP _torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(SEXP out0SEXP, SEXP out1SEXP, SEXP out2SEXP, SEXP out3SEXP, SEXP querySEXP, SEXP keySEXP, SEXP valueSEXP, SEXP embed_dimSEXP, SEXP num_headSEXP, SEXP qkv_weightSEXP, SEXP qkv_biasSEXP, SEXP proj_weightSEXP, SEXP proj_biasSEXP, SEXP maskSEXP, SEXP incr_keySEXP, SEXP incr_valueSEXP, SEXP need_weightsSEXP, SEXP average_attn_weightsSEXP) { -BEGIN_RCPP - Rcpp::RObject rcpp_result_gen; - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out0(out0SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out1(out1SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out2(out2SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type out3(out3SEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type query(querySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type key(keySEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type value(valueSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type embed_dim(embed_dimSEXP); - Rcpp::traits::input_parameter< XPtrTorchint64_t >::type num_head(num_headSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_weight(qkv_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type qkv_bias(qkv_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_weight(proj_weightSEXP); - Rcpp::traits::input_parameter< XPtrTorchTensor >::type proj_bias(proj_biasSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type mask(maskSEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_key(incr_keySEXP); - Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type incr_value(incr_valueSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type need_weights(need_weightsSEXP); - Rcpp::traits::input_parameter< XPtrTorchbool >::type average_attn_weights(average_attn_weightsSEXP); - rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor(out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights)); - return rcpp_result_gen; -END_RCPP -} // cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor XPtrTorchTensor cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor(XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool arg1, XPtrTorchbool arg2, XPtrTorchbool arg3); RcppExport SEXP _torch_cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor(SEXP outSEXP, SEXP selfSEXP, SEXP arg1SEXP, SEXP arg2SEXP, SEXP arg3SEXP) { @@ -43977,6 +45797,54 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +void cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +Rcpp::List cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool void cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); RcppExport SEXP _torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { @@ -44025,6 +45893,178 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +void cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool +Rcpp::List cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP exp_avgsSEXP, SEXP exp_avg_sqsSEXP, SEXP max_exp_avg_sqsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP beta1SEXP, SEXP beta2SEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP amsgradSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avgs(exp_avgsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type exp_avg_sqs(exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type max_exp_avg_sqs(max_exp_avg_sqsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta1(beta1SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type beta2(beta2SEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type amsgrad(amsgradSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +void cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +Rcpp::List cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +void cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool +Rcpp::List cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP momentum_buffer_listSEXP, SEXP weight_decaySEXP, SEXP momentumSEXP, SEXP lrSEXP, SEXP dampeningSEXP, SEXP nesterovSEXP, SEXP maximizeSEXP, SEXP is_first_stepSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type momentum_buffer_list(momentum_buffer_listSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type momentum(momentumSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensor >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type dampening(dampeningSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type nesterov(nesterovSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type is_first_step(is_first_stepSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf)); + return rcpp_result_gen; +END_RCPP +} +// cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool +void cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(SEXP outSEXP, SEXP selfSEXP, SEXP gradsSEXP, SEXP state_sumsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP lr_decaySEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type out(outSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_sums(state_sumsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr_decay(lr_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); + return R_NilValue; +END_RCPP +} +// cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool +Rcpp::List cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf); +RcppExport SEXP _torch_cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(SEXP selfSEXP, SEXP gradsSEXP, SEXP state_sumsSEXP, SEXP state_stepsSEXP, SEXP lrSEXP, SEXP lr_decaySEXP, SEXP weight_decaySEXP, SEXP epsSEXP, SEXP maximizeSEXP, SEXP grad_scaleSEXP, SEXP found_infSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type self(selfSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type grads(gradsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_sums(state_sumsSEXP); + Rcpp::traits::input_parameter< XPtrTorchTensorList >::type state_steps(state_stepsSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr(lrSEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type lr_decay(lr_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type weight_decay(weight_decaySEXP); + Rcpp::traits::input_parameter< XPtrTorchdouble >::type eps(epsSEXP); + Rcpp::traits::input_parameter< XPtrTorchbool >::type maximize(maximizeSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type grad_scale(grad_scaleSEXP); + Rcpp::traits::input_parameter< XPtrTorchOptionalTensor >::type found_inf(found_infSEXP); + rcpp_result_gen = Rcpp::wrap(cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf)); + return rcpp_result_gen; +END_RCPP +} // cpp_torch_generator XPtrTorchGenerator cpp_torch_generator(); RcppExport SEXP _torch_cpp_torch_generator() { @@ -45752,9 +47792,11 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method__is_all_true_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__is_all_true_self_Tensor, 1}, {"_torch_cpp_torch_method__is_any_true_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__is_any_true_self_Tensor, 1}, {"_torch_cpp_torch_method_all_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_all_self_Tensor_dim_int64_t, 3}, + {"_torch_cpp_torch_method_all_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method_all_self_Tensor_dim_IntArrayRef, 3}, {"_torch_cpp_torch_method_all_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_method_all_self_Tensor_dim_Dimname, 3}, {"_torch_cpp_torch_method_allclose_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_allclose_self_Tensor_other_Tensor, 5}, {"_torch_cpp_torch_method_any_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_any_self_Tensor_dim_int64_t, 3}, + {"_torch_cpp_torch_method_any_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method_any_self_Tensor_dim_IntArrayRef, 3}, {"_torch_cpp_torch_method_any_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_method_any_self_Tensor_dim_Dimname, 3}, {"_torch_cpp_torch_method_argmax_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_argmax_self_Tensor, 3}, {"_torch_cpp_torch_method_argmin_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_argmin_self_Tensor, 3}, @@ -45793,6 +47835,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_copysign__self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_copysign__self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_method_copysign_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_method_copysign_self_Tensor_other_Scalar, 2}, {"_torch_cpp_torch_method_copysign__self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_method_copysign__self_Tensor_other_Scalar, 2}, + {"_torch_cpp_torch_method__lazy_clone_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__lazy_clone_self_Tensor, 1}, {"_torch_cpp_torch_method_logical_not_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_logical_not_self_Tensor, 1}, {"_torch_cpp_torch_method_logical_not__self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_logical_not__self_Tensor, 1}, {"_torch_cpp_torch_method_logical_xor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_logical_xor_self_Tensor_other_Tensor, 2}, @@ -45914,13 +47957,13 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_gcd__self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_gcd__self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_method_lcm_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_lcm_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_method_lcm__self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_lcm__self_Tensor_other_Tensor, 2}, - {"_torch_cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor", (DL_FUNC) &_torch_cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor, 2}, + {"_torch_cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor", (DL_FUNC) &_torch_cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor, 2}, {"_torch_cpp_torch_method_index_copy__self_Tensor_dim_int64_t_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_copy__self_Tensor_dim_int64_t_index_Tensor_source_Tensor, 4}, {"_torch_cpp_torch_method_index_copy_self_Tensor_dim_int64_t_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_copy_self_Tensor_dim_int64_t_index_Tensor_source_Tensor, 4}, {"_torch_cpp_torch_method_index_copy__self_Tensor_dim_Dimname_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_copy__self_Tensor_dim_Dimname_index_Tensor_source_Tensor, 4}, {"_torch_cpp_torch_method_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor, 4}, - {"_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 4}, - {"_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 4}, + {"_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, + {"_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, {"_torch_cpp_torch_method_isclose_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_method_isclose_self_Tensor_other_Tensor, 5}, {"_torch_cpp_torch_method_isnan_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_isnan_self_Tensor, 1}, {"_torch_cpp_torch_method_is_distributed_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_is_distributed_self_Tensor, 1}, @@ -46055,6 +48098,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_detach__self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_detach__self_Tensor, 1}, {"_torch_cpp_torch_method_size_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_method_size_self_Tensor_dim_Dimname, 2}, {"_torch_cpp_torch_method_slice_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_slice_self_Tensor, 5}, + {"_torch_cpp_torch_method_slice_inverse_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_method_slice_inverse_self_Tensor_src_Tensor, 6}, {"_torch_cpp_torch_method_slice_scatter_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_method_slice_scatter_self_Tensor_src_Tensor, 6}, {"_torch_cpp_torch_method_select_scatter_self_Tensor_src_Tensor_dim_int64_t_index_int64_t", (DL_FUNC) &_torch_cpp_torch_method_select_scatter_self_Tensor_src_Tensor_dim_int64_t_index_int64_t, 4}, {"_torch_cpp_torch_method_diagonal_scatter_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_method_diagonal_scatter_self_Tensor_src_Tensor, 5}, @@ -46118,7 +48162,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_rot90_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_rot90_self_Tensor, 3}, {"_torch_cpp_torch_method__nested_tensor_size_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__nested_tensor_size_self_Tensor, 1}, {"_torch_cpp_torch_method__nested_tensor_strides_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__nested_tensor_strides_self_Tensor, 1}, - {"_torch_cpp_torch_method__nested_tensor_offsets_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__nested_tensor_offsets_self_Tensor, 1}, + {"_torch_cpp_torch_method__nested_tensor_storage_offsets_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__nested_tensor_storage_offsets_self_Tensor, 1}, {"_torch_cpp_torch_method_trunc_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_trunc_self_Tensor, 1}, {"_torch_cpp_torch_method_trunc__self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_trunc__self_Tensor, 1}, {"_torch_cpp_torch_method_fix_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_fix_self_Tensor, 1}, @@ -46160,8 +48204,9 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_sparse_resize__self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_sparse_resize__self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 4}, {"_torch_cpp_torch_method_sparse_resize_and_clear__self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_sparse_resize_and_clear__self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 4}, {"_torch_cpp_torch_method_sparse_mask_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_method_sparse_mask_self_Tensor_mask_Tensor, 2}, - {"_torch_cpp_torch_method_to_dense_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_dense_self_Tensor, 2}, - {"_torch_cpp_torch_method__to_dense_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__to_dense_self_Tensor, 2}, + {"_torch_cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor, 3}, + {"_torch_cpp_torch_method_to_dense_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_dense_self_Tensor, 3}, + {"_torch_cpp_torch_method__to_dense_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__to_dense_self_Tensor, 3}, {"_torch_cpp_torch_method_sparse_dim_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_sparse_dim_self_Tensor, 1}, {"_torch_cpp_torch_method__dimI_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__dimI_self_Tensor, 1}, {"_torch_cpp_torch_method_dense_dim_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_dense_dim_self_Tensor, 1}, @@ -46181,11 +48226,17 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_unbind_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_unbind_self_Tensor_dim_int64_t, 2}, {"_torch_cpp_torch_method_unbind_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_method_unbind_self_Tensor_dim_Dimname, 2}, {"_torch_cpp_torch_method_to_sparse_self_Tensor_sparse_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_self_Tensor_sparse_dim_int64_t, 2}, + {"_torch_cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t, 2}, {"_torch_cpp_torch_method_to_sparse_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_self_Tensor, 4}, + {"_torch_cpp_torch_method__to_sparse_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_self_Tensor, 4}, {"_torch_cpp_torch_method_to_sparse_csr_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_csr_self_Tensor, 2}, + {"_torch_cpp_torch_method__to_sparse_csr_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_csr_self_Tensor, 2}, {"_torch_cpp_torch_method_to_sparse_csc_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_csc_self_Tensor, 2}, + {"_torch_cpp_torch_method__to_sparse_csc_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_csc_self_Tensor, 2}, {"_torch_cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef, 3}, + {"_torch_cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef, 3}, {"_torch_cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef, 3}, + {"_torch_cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef, 3}, {"_torch_cpp_torch_method_to_mkldnn_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_to_mkldnn_self_Tensor, 2}, {"_torch_cpp_torch_method_dequantize_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_dequantize_self_Tensor, 1}, {"_torch_cpp_torch_method_q_scale_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_q_scale_self_Tensor, 1}, @@ -46356,6 +48407,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_method_index_select_self_Tensor_dim_Dimname_index_Tensor", (DL_FUNC) &_torch_cpp_torch_method_index_select_self_Tensor_dim_Dimname_index_Tensor, 3}, {"_torch_cpp_torch_method_masked_select_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_method_masked_select_self_Tensor_mask_Tensor, 2}, {"_torch_cpp_torch_method_nonzero_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_nonzero_self_Tensor, 1}, + {"_torch_cpp_torch_method_nonzero_static_self_Tensor_size_int64_t", (DL_FUNC) &_torch_cpp_torch_method_nonzero_static_self_Tensor_size_int64_t, 3}, {"_torch_cpp_torch_method_nonzero_numpy_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_nonzero_numpy_self_Tensor, 1}, {"_torch_cpp_torch_method_argwhere_self_Tensor", (DL_FUNC) &_torch_cpp_torch_method_argwhere_self_Tensor, 1}, {"_torch_cpp_torch_method_gather_self_Tensor_dim_int64_t_index_Tensor", (DL_FUNC) &_torch_cpp_torch_method_gather_self_Tensor_dim_int64_t_index_Tensor, 4}, @@ -46480,7 +48532,16 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__has_same_storage_numel_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__has_same_storage_numel_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_align_tensors_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_align_tensors_tensors_TensorList, 1}, {"_torch_cpp_torch_namespace__assert_async_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__assert_async_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view, 2}, + {"_torch_cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view, 2}, + {"_torch_cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor, 3}, + {"_torch_cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor, 3}, {"_torch_cpp_torch_namespace__assert_tensor_metadata_a_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__assert_tensor_metadata_a_Tensor, 4}, + {"_torch_cpp_torch_namespace__print_s_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace__print_s_c10string_view, 1}, + {"_torch_cpp_torch_namespace_sym_constrain_range_size_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_sym_constrain_range_size_Scalar, 3}, + {"_torch_cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar, 3}, + {"_torch_cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor, 4}, + {"_torch_cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor, 4}, {"_torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t, 5}, {"_torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_Tensor_target_lengths_Tensor_blank_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_Tensor_target_lengths_Tensor_blank_int64_t, 5}, {"_torch_cpp_torch_namespace__cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t_deterministic_bool_zero_infinity_bool", (DL_FUNC) &_torch_cpp_torch_namespace__cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t_deterministic_bool_zero_infinity_bool, 7}, @@ -46557,13 +48618,18 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__is_all_true_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__is_all_true_self_Tensor, 1}, {"_torch_cpp_torch_namespace__is_any_true_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__is_any_true_self_Tensor, 1}, {"_torch_cpp_torch_namespace__test_check_tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__test_check_tensor_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_all_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_all_self_Tensor_dim_int64_t, 3}, + {"_torch_cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef, 3}, {"_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t, 4}, + {"_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_all_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_all_self_Tensor_dim_Dimname, 3}, {"_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_Dimname, 4}, {"_torch_cpp_torch_namespace_allclose_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_allclose_self_Tensor_other_Tensor, 5}, {"_torch_cpp_torch_namespace_any_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_any_self_Tensor_dim_int64_t, 3}, + {"_torch_cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef, 3}, {"_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t, 4}, + {"_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_any_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_any_self_Tensor_dim_Dimname, 3}, {"_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_Dimname, 4}, {"_torch_cpp_torch_namespace_arange_end_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_arange_end_Scalar, 2}, @@ -46637,6 +48703,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_copysign_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_copysign_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_copysign_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_copysign_self_Tensor_other_Scalar, 2}, {"_torch_cpp_torch_namespace_copysign_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_copysign_out_out_Tensor_self_Tensor_other_Scalar, 3}, + {"_torch_cpp_torch_namespace__lazy_clone_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__lazy_clone_self_Tensor, 1}, {"_torch_cpp_torch_namespace_logical_not_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_logical_not_self_Tensor, 1}, {"_torch_cpp_torch_namespace_logical_not_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_logical_not_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_logical_xor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_logical_xor_self_Tensor_other_Tensor, 2}, @@ -46743,6 +48810,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_cudnn_batch_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_exponential_average_factor_double_epsilon_double", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_batch_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_exponential_average_factor_double_epsilon_double, 8}, {"_torch_cpp_torch_namespace_cudnn_batch_norm_backward_input_Tensor_grad_output_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_epsilon_double_reserveSpace_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_batch_norm_backward_input_Tensor_grad_output_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_epsilon_double_reserveSpace_Tensor, 9}, {"_torch_cpp_torch_namespace_cudnn_convolution_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_convolution_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool, 9}, + {"_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool, 10}, {"_torch_cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool, 10}, {"_torch_cpp_torch_namespace__mps_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__mps_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t, 7}, {"_torch_cpp_torch_namespace_mps_convolution_transpose_backward_self_Tensor_grad_output_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_output_mask_stdarraybool2", (DL_FUNC) &_torch_cpp_torch_namespace_mps_convolution_transpose_backward_self_Tensor_grad_output_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_output_mask_stdarraybool2, 9}, @@ -46831,6 +48899,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__embedding_bag_per_sample_weights_backward_grad_Tensor_weight_Tensor_indices_Tensor_offsets_Tensor_offset2bag_Tensor_mode_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__embedding_bag_per_sample_weights_backward_grad_Tensor_weight_Tensor_indices_Tensor_offsets_Tensor_offset2bag_Tensor_mode_int64_t, 7}, {"_torch_cpp_torch_namespace_empty_size_IntArrayRef_names_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_empty_size_IntArrayRef_names_DimnameList, 4}, {"_torch_cpp_torch_namespace_empty_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_empty_size_IntArrayRef, 3}, + {"_torch_cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef, 3}, {"_torch_cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef, 5}, {"_torch_cpp_torch_namespace__empty_per_channel_affine_quantized_size_IntArrayRef_scales_Tensor_zero_points_Tensor_axis_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__empty_per_channel_affine_quantized_size_IntArrayRef_scales_Tensor_zero_points_Tensor_axis_int64_t, 6}, {"_torch_cpp_torch_namespace__resize_output__self_Tensor_size_IntArrayRef_device_Device", (DL_FUNC) &_torch_cpp_torch_namespace__resize_output__self_Tensor_size_IntArrayRef_device_Device, 3}, @@ -46914,18 +48983,18 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__fft_c2c_self_Tensor_dim_IntArrayRef_normalization_int64_t_forward_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fft_c2c_self_Tensor_dim_IntArrayRef_normalization_int64_t_forward_bool, 4}, {"_torch_cpp_torch_namespace__fft_c2c_out_out_Tensor_self_Tensor_dim_IntArrayRef_normalization_int64_t_forward_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fft_c2c_out_out_Tensor_self_Tensor_dim_IntArrayRef_normalization_int64_t_forward_bool, 5}, {"_torch_cpp_torch_namespace__validate_compressed_sparse_indices_is_crow_bool_compressed_idx_Tensor_plain_idx_Tensor_cdim_int64_t_dim_int64_t_nnz_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__validate_compressed_sparse_indices_is_crow_bool_compressed_idx_Tensor_plain_idx_Tensor_cdim_int64_t_dim_int64_t_nnz_int64_t, 6}, - {"_torch_cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t, 1}, - {"_torch_cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t, 1}, - {"_torch_cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t, 2}, - {"_torch_cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t, 1}, - {"_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor, 2}, - {"_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor, 3}, + {"_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor, 2}, + {"_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor, 3}, + {"_torch_cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor", (DL_FUNC) &_torch_cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor, 2}, + {"_torch_cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar, 4}, + {"_torch_cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, {"_torch_cpp_torch_namespace_index_copy_out_out_Tensor_self_Tensor_dim_int64_t_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_copy_out_out_Tensor_self_Tensor_dim_int64_t_index_Tensor_source_Tensor, 5}, {"_torch_cpp_torch_namespace_index_copy_self_Tensor_dim_int64_t_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_copy_self_Tensor_dim_int64_t_index_Tensor_source_Tensor, 4}, {"_torch_cpp_torch_namespace_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_copy_self_Tensor_dim_Dimname_index_Tensor_source_Tensor, 4}, - {"_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 4}, - {"_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 4}, - {"_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 5}, + {"_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, + {"_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, + {"_torch_cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 4}, + {"_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 5}, {"_torch_cpp_torch_namespace_instance_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_use_input_stats_bool_momentum_double_eps_double_cudnn_enabled_bool", (DL_FUNC) &_torch_cpp_torch_namespace_instance_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_use_input_stats_bool_momentum_double_eps_double_cudnn_enabled_bool, 9}, {"_torch_cpp_torch_namespace_isclose_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_isclose_self_Tensor_other_Tensor, 5}, {"_torch_cpp_torch_namespace_isin_out_out_Tensor_elements_Tensor_test_elements_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_isin_out_out_Tensor_elements_Tensor_test_elements_Tensor, 5}, @@ -46956,6 +49025,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_layer_norm_input_Tensor_normalized_shape_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_layer_norm_input_Tensor_normalized_shape_IntArrayRef, 6}, {"_torch_cpp_torch_namespace_native_layer_norm_input_Tensor_normalized_shape_IntArrayRef_weight_Tensor_bias_Tensor_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_native_layer_norm_input_Tensor_normalized_shape_IntArrayRef_weight_Tensor_bias_Tensor_eps_double, 5}, {"_torch_cpp_torch_namespace_native_layer_norm_backward_grad_out_Tensor_input_Tensor_normalized_shape_IntArrayRef_mean_Tensor_rstd_Tensor_weight_Tensor_bias_Tensor_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_native_layer_norm_backward_grad_out_Tensor_input_Tensor_normalized_shape_IntArrayRef_mean_Tensor_rstd_Tensor_weight_Tensor_bias_Tensor_output_mask_stdarraybool3, 8}, + {"_torch_cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_nan_to_num_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nan_to_num_self_Tensor, 4}, {"_torch_cpp_torch_namespace_nan_to_num__self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nan_to_num__self_Tensor, 4}, {"_torch_cpp_torch_namespace_nan_to_num_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nan_to_num_out_out_Tensor_self_Tensor, 5}, @@ -46966,10 +49036,22 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_mkldnn_linear_backward_input_input_size_IntArrayRef_grad_output_Tensor_weight_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_linear_backward_input_input_size_IntArrayRef_grad_output_Tensor_weight_Tensor, 3}, {"_torch_cpp_torch_namespace_mkldnn_linear_backward_weights_grad_output_Tensor_input_Tensor_weight_Tensor_bias_defined_bool", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_linear_backward_weights_grad_output_Tensor_input_Tensor_weight_Tensor_bias_defined_bool, 4}, {"_torch_cpp_torch_namespace_mkldnn_linear_backward_self_Tensor_grad_output_Tensor_weight_Tensor_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_linear_backward_self_Tensor_grad_output_Tensor_weight_Tensor_output_mask_stdarraybool3, 4}, + {"_torch_cpp_torch_namespace__cslt_compress_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__cslt_compress_input_Tensor, 1}, + {"_torch_cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor, 7}, + {"_torch_cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor, 6}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor, 3}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor, 2}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor, 2}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor, 6}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor, 4}, + {"_torch_cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor, 7}, + {"_torch_cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor, 5}, {"_torch_cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor, 7}, {"_torch_cpp_torch_namespace_fbgemm_linear_int8_weight_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_linear_int8_weight_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor, 7}, {"_torch_cpp_torch_namespace_fbgemm_linear_quantize_weight_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_linear_quantize_weight_input_Tensor, 1}, {"_torch_cpp_torch_namespace_fbgemm_pack_gemm_matrix_fp16_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_pack_gemm_matrix_fp16_input_Tensor, 1}, + {"_torch_cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor, 4}, + {"_torch_cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t, 7}, {"_torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor, 3}, {"_torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_input_Tensor_packed_weight_Tensor_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_linear_fp16_weight_input_Tensor_packed_weight_Tensor_bias_Tensor, 3}, {"_torch_cpp_torch_namespace_fbgemm_pack_quantized_matrix_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fbgemm_pack_quantized_matrix_input_Tensor, 1}, @@ -46978,7 +49060,13 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_ldexp__self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_ldexp__self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_ldexp_out_out_Tensor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_ldexp_out_out_Tensor_self_Tensor_other_Tensor, 3}, {"_torch_cpp_torch_namespace_linspace_start_Scalar_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_start_Scalar_end_Scalar_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t, 4}, {"_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t, 4}, + {"_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t, 4}, {"_torch_cpp_torch_namespace_log_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_log_self_Tensor, 1}, {"_torch_cpp_torch_namespace_log__self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_log__self_Tensor, 1}, {"_torch_cpp_torch_namespace_log_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_log_out_out_Tensor_self_Tensor, 2}, @@ -47004,7 +49092,13 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_xlogy_out_out_Tensor_self_Scalar_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_xlogy_out_out_Tensor_self_Scalar_other_Tensor, 3}, {"_torch_cpp_torch_namespace_xlogy_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_xlogy_out_out_Tensor_self_Tensor_other_Scalar, 3}, {"_torch_cpp_torch_namespace_logspace_start_Scalar_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_start_Scalar_end_Scalar_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t, 5}, {"_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t, 5}, + {"_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t, 5}, {"_torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t, 3}, {"_torch_cpp_torch_namespace_log_softmax_out_out_Tensor_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_log_softmax_out_out_Tensor_self_Tensor_dim_int64_t, 4}, {"_torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_log_softmax_self_Tensor_dim_Dimname, 3}, @@ -47053,8 +49147,10 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_mkldnn_max_pool3d_backward_grad_output_Tensor_output_Tensor_input_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_max_pool3d_backward_grad_output_Tensor_output_Tensor_input_Tensor_kernel_size_IntArrayRef, 8}, {"_torch_cpp_torch_namespace_quantized_max_pool1d_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool1d_self_Tensor_kernel_size_IntArrayRef, 6}, {"_torch_cpp_torch_namespace_quantized_max_pool2d_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool2d_self_Tensor_kernel_size_IntArrayRef, 6}, + {"_torch_cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef, 6}, {"_torch_cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef, 6}, {"_torch_cpp_torch_namespace_mean_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mean_self_Tensor, 2}, + {"_torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor, 3}, {"_torch_cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_mean_out_out_Tensor_self_Tensor_dim_IntArrayRef, 5}, {"_torch_cpp_torch_namespace_mean_self_Tensor_dim_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_mean_self_Tensor_dim_DimnameList, 4}, @@ -47093,6 +49189,11 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_miopen_rnn_backward_input_Tensor_weight_TensorList_weight_stride0_int64_t_weight_buf_Tensor_hx_Tensor_cx_Tensor_output_Tensor_grad_output_Tensor_grad_hy_Tensor_grad_cy_Tensor_mode_int64_t_hidden_size_int64_t_num_layers_int64_t_batch_first_bool_dropout_double_train_bool_bidirectional_bool_batch_sizes_IntArrayRef_dropout_state_Tensor_reserve_Tensor_output_mask_stdarraybool4", (DL_FUNC) &_torch_cpp_torch_namespace_miopen_rnn_backward_input_Tensor_weight_TensorList_weight_stride0_int64_t_weight_buf_Tensor_hx_Tensor_cx_Tensor_output_Tensor_grad_output_Tensor_grad_hy_Tensor_grad_cy_Tensor_mode_int64_t_hidden_size_int64_t_num_layers_int64_t_batch_first_bool_dropout_double_train_bool_bidirectional_bool_batch_sizes_IntArrayRef_dropout_state_Tensor_reserve_Tensor_output_mask_stdarraybool4, 21}, {"_torch_cpp_torch_namespace_mm_self_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mm_self_Tensor_mat2_Tensor, 2}, {"_torch_cpp_torch_namespace_mm_out_out_Tensor_self_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mm_out_out_Tensor_self_Tensor_mat2_Tensor, 3}, + {"_torch_cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor, 2}, + {"_torch_cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor, 3}, + {"_torch_cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t, 2}, + {"_torch_cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor, 4}, + {"_torch_cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor, 3}, {"_torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor, 2}, {"_torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor_reduce_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor_reduce_c10string_view, 3}, {"_torch_cpp_torch_namespace__sparse_sparse_matmul_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sparse_matmul_self_Tensor_other_Tensor, 2}, @@ -47117,6 +49218,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_native_batch_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_native_batch_norm_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double, 8}, {"_torch_cpp_torch_namespace_native_batch_norm_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_native_batch_norm_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double, 11}, {"_torch_cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double, 8}, + {"_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 7}, {"_torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double, 11}, {"_torch_cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_input_Tensor_weight_Tensor_bias_Tensor_training_bool_momentum_double_eps_double, 6}, {"_torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_training_bool_momentum_double_eps_double, 9}, @@ -47127,7 +49229,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_batch_norm_gather_stats_with_counts_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_counts_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_gather_stats_with_counts_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_counts_Tensor, 8}, {"_torch_cpp_torch_namespace_native_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_invstd_Tensor_train_bool_eps_double_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_native_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_invstd_Tensor_train_bool_eps_double_output_mask_stdarraybool3, 10}, {"_torch_cpp_torch_namespace_batch_norm_backward_reduce_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_reduce_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool, 8}, - {"_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor, 8}, + {"_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor, 8}, {"_torch_cpp_torch_namespace_batch_norm_update_stats_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_update_stats_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double, 4}, {"_torch_cpp_torch_namespace__nnpack_spatial_convolution_input_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__nnpack_spatial_convolution_input_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef, 5}, {"_torch_cpp_torch_namespace_ones_size_IntArrayRef_names_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_ones_size_IntArrayRef_names_DimnameList, 3}, @@ -47276,6 +49378,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_size_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_size_self_Tensor_dim_Dimname, 2}, {"_torch_cpp_torch_namespace_slice_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_slice_self_Tensor, 5}, {"_torch_cpp_torch_namespace_slice_backward_grad_output_Tensor_input_sizes_IntArrayRef_dim_int64_t_start_int64_t_end_int64_t_step_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_slice_backward_grad_output_Tensor_input_sizes_IntArrayRef_dim_int64_t_start_int64_t_end_int64_t_step_int64_t, 6}, + {"_torch_cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor, 6}, {"_torch_cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor, 6}, {"_torch_cpp_torch_namespace_select_scatter_self_Tensor_src_Tensor_dim_int64_t_index_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_select_scatter_self_Tensor_src_Tensor_dim_int64_t_index_int64_t, 4}, {"_torch_cpp_torch_namespace_diagonal_scatter_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_diagonal_scatter_self_Tensor_src_Tensor, 5}, @@ -47305,6 +49408,8 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_squeeze_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_squeeze_self_Tensor_dim_IntArrayRef, 2}, {"_torch_cpp_torch_namespace_sspaddmm_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_sspaddmm_self_Tensor_mat1_Tensor_mat2_Tensor, 5}, {"_torch_cpp_torch_namespace_sspaddmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_sspaddmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor, 6}, + {"_torch_cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t, 3}, + {"_torch_cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t, 4}, {"_torch_cpp_torch_namespace_stack_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_stack_tensors_TensorList, 2}, {"_torch_cpp_torch_namespace_stack_out_out_Tensor_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_stack_out_out_Tensor_tensors_TensorList, 3}, {"_torch_cpp_torch_namespace__stack_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__stack_tensors_TensorList, 2}, @@ -47380,8 +49485,19 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__nested_tensor_from_mask_left_aligned_t_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_from_mask_left_aligned_t_Tensor_mask_Tensor, 2}, {"_torch_cpp_torch_namespace__nested_from_padded_padded_Tensor_cpu_nested_shape_example_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_from_padded_padded_Tensor_cpu_nested_shape_example_Tensor, 3}, {"_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_padded_Tensor_nt_example_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_padded_Tensor_nt_example_Tensor, 2}, - {"_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef, 4}, - {"_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef, 4}, + {"_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor, 4}, + {"_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor, 4}, + {"_torch_cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor, 7}, + {"_torch_cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor, 7}, + {"_torch_cpp_torch_namespace__nested_get_values_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_values_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_values_copy_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_values_copy_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_offsets_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_offsets_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_lengths_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_lengths_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_ragged_idx_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_ragged_idx_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_min_seqlen_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_min_seqlen_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_max_seqlen_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_max_seqlen_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor, 1}, + {"_torch_cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor, 1}, {"_torch_cpp_torch_namespace__trilinear_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__trilinear_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef, 8}, {"_torch_cpp_torch_namespace_triplet_margin_loss_anchor_Tensor_positive_Tensor_negative_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_triplet_margin_loss_anchor_Tensor_positive_Tensor_negative_Tensor, 8}, {"_torch_cpp_torch_namespace_trunc_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_trunc_self_Tensor, 1}, @@ -47431,6 +49547,10 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_binomial_count_Tensor_prob_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_binomial_count_Tensor_prob_Tensor, 3}, {"_torch_cpp_torch_namespace_native_norm_self_Tensor_p_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_native_norm_self_Tensor_p_Scalar, 2}, {"_torch_cpp_torch_namespace_native_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_native_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType, 5}, + {"_torch_cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 7}, + {"_torch_cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 11}, + {"_torch_cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 7}, + {"_torch_cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor, 11}, {"_torch_cpp_torch_namespace__sparse_sum_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sum_self_Tensor, 1}, {"_torch_cpp_torch_namespace__sparse_sum_self_Tensor_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sum_self_Tensor_dtype_ScalarType, 2}, {"_torch_cpp_torch_namespace__sparse_sum_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sum_self_Tensor_dim_IntArrayRef, 2}, @@ -47489,6 +49609,9 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_addmm_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_addmm_self_Tensor_mat1_Tensor_mat2_Tensor, 5}, {"_torch_cpp_torch_namespace__addmm_activation_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__addmm_activation_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor, 7}, {"_torch_cpp_torch_namespace__addmm_activation_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__addmm_activation_self_Tensor_mat1_Tensor_mat2_Tensor, 6}, + {"_torch_cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor, 8}, + {"_torch_cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor, 9}, + {"_torch_cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions, 6}, {"_torch_cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions, 5}, {"_torch_cpp_torch_namespace_sparse_csr_tensor_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_csr_tensor_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions, 5}, {"_torch_cpp_torch_namespace_sparse_csc_tensor_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_csc_tensor_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions, 5}, @@ -47505,27 +49628,28 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__sparse_bsr_tensor_unsafe_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_bsr_tensor_unsafe_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef, 5}, {"_torch_cpp_torch_namespace__sparse_bsc_tensor_unsafe_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_bsc_tensor_unsafe_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef, 5}, {"_torch_cpp_torch_namespace_sparse_coo_tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_size_IntArrayRef_options_TensorOptions, 2}, - {"_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions, 3}, - {"_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions, 4}, - {"_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, - {"_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef, 3}, + {"_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions, 4}, + {"_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions, 5}, + {"_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef, 5}, + {"_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__validate_sparse_compressed_tensor_args_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_layout_Layout", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_compressed_tensor_args_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_layout_Layout, 5}, {"_torch_cpp_torch_namespace__validate_sparse_csr_tensor_args_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_csr_tensor_args_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__validate_sparse_csc_tensor_args_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_csc_tensor_args_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__validate_sparse_bsr_tensor_args_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_bsr_tensor_args_crow_indices_Tensor_col_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__validate_sparse_bsc_tensor_args_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__validate_sparse_bsc_tensor_args_ccol_indices_Tensor_row_indices_Tensor_values_Tensor_size_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_options_TensorOptions, 4}, - {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions, 6}, + {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions, 7}, {"_torch_cpp_torch_namespace__to_cpu_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__to_cpu_tensors_TensorList, 1}, - {"_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor, 2}, + {"_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor, 3}, {"_torch_cpp_torch_namespace__coalesce_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__coalesce_self_Tensor, 1}, {"_torch_cpp_torch_namespace_hspmm_out_out_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_hspmm_out_out_Tensor_mat1_Tensor_mat2_Tensor, 3}, {"_torch_cpp_torch_namespace_hspmm_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_hspmm_mat1_Tensor_mat2_Tensor, 2}, {"_torch_cpp_torch_namespace_copy_sparse_to_sparse__self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_copy_sparse_to_sparse__self_Tensor_src_Tensor, 3}, {"_torch_cpp_torch_namespace_unbind_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_unbind_self_Tensor_dim_int64_t, 2}, {"_torch_cpp_torch_namespace_unbind_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_unbind_self_Tensor_dim_Dimname, 2}, + {"_torch_cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor, 1}, {"_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor, 6}, - {"_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor, 5}, + {"_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor, 6}, {"_torch_cpp_torch_namespace_to_mkldnn_backward_grad_Tensor_input_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_mkldnn_backward_grad_Tensor_input_Tensor, 2}, {"_torch_cpp_torch_namespace_quantize_per_tensor_dynamic_self_Tensor_dtype_ScalarType_reduce_range_bool", (DL_FUNC) &_torch_cpp_torch_namespace_quantize_per_tensor_dynamic_self_Tensor_dtype_ScalarType_reduce_range_bool, 3}, {"_torch_cpp_torch_namespace_quantize_per_tensor_self_Tensor_scale_double_zero_point_int64_t_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_quantize_per_tensor_self_Tensor_scale_double_zero_point_int64_t_dtype_ScalarType, 4}, @@ -47568,7 +49692,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_result_type_other_Scalar_tensor_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_result_type_other_Scalar_tensor_Tensor, 2}, {"_torch_cpp_torch_namespace_result_type_scalar_Scalar_tensor_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_result_type_scalar_Scalar_tensor_Tensor, 2}, {"_torch_cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar, 2}, - {"_torch_cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType, 2}, + {"_torch_cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType, 2}, {"_torch_cpp_torch_namespace_promote_types_type1_ScalarType_type2_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_promote_types_type1_ScalarType_type2_ScalarType, 2}, {"_torch_cpp_torch_namespace__local_scalar_dense_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__local_scalar_dense_self_Tensor, 1}, {"_torch_cpp_torch_namespace__lstm_mps_input_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool", (DL_FUNC) &_torch_cpp_torch_namespace__lstm_mps_input_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool, 9}, @@ -47605,6 +49729,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_masked_fill_self_Tensor_mask_Tensor_value_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_masked_fill_self_Tensor_mask_Tensor_value_Scalar, 3}, {"_torch_cpp_torch_namespace_masked_fill_self_Tensor_mask_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_masked_fill_self_Tensor_mask_Tensor_value_Tensor, 3}, {"_torch_cpp_torch_namespace_masked_scatter_self_Tensor_mask_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_masked_scatter_self_Tensor_mask_Tensor_source_Tensor, 3}, + {"_torch_cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef, 3}, {"_torch_cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor, 4}, {"_torch_cpp_torch_namespace__masked_softmax_backward_grad_output_Tensor_output_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__masked_softmax_backward_grad_output_Tensor_output_Tensor_mask_Tensor, 4}, {"_torch_cpp_torch_namespace_put_self_Tensor_index_Tensor_source_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_put_self_Tensor_index_Tensor_source_Tensor, 4}, @@ -47739,6 +49864,8 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_masked_select_backward_grad_Tensor_input_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_masked_select_backward_grad_Tensor_input_Tensor_mask_Tensor, 3}, {"_torch_cpp_torch_namespace_nonzero_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nonzero_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_nonzero_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nonzero_self_Tensor, 1}, + {"_torch_cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t, 4}, + {"_torch_cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t, 3}, {"_torch_cpp_torch_namespace_nonzero_numpy_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nonzero_numpy_self_Tensor, 1}, {"_torch_cpp_torch_namespace_argwhere_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_argwhere_self_Tensor, 1}, {"_torch_cpp_torch_namespace_gather_out_out_Tensor_self_Tensor_dim_int64_t_index_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_gather_out_out_Tensor_self_Tensor_dim_int64_t_index_Tensor, 5}, @@ -47837,6 +49964,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_remainder_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_remainder_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_remainder_self_Scalar_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_remainder_self_Scalar_other_Tensor, 2}, {"_torch_cpp_torch_namespace_min_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_min_self_Tensor, 1}, + {"_torch_cpp_torch_namespace_min_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_min_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_fmin_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fmin_self_Tensor_other_Tensor, 2}, {"_torch_cpp_torch_namespace_fmin_out_out_Tensor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fmin_out_out_Tensor_self_Tensor_other_Tensor, 3}, {"_torch_cpp_torch_namespace_max_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_max_self_Tensor, 1}, @@ -47871,6 +49999,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_msort_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_msort_self_Tensor, 1}, {"_torch_cpp_torch_namespace_argsort_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_argsort_self_Tensor_dim_int64_t, 3}, {"_torch_cpp_torch_namespace_argsort_self_Tensor_stable_bool_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_argsort_self_Tensor_stable_bool_dim_int64_t, 4}, + {"_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool", (DL_FUNC) &_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool, 5}, {"_torch_cpp_torch_namespace_argsort_self_Tensor_dim_Dimname", (DL_FUNC) &_torch_cpp_torch_namespace_argsort_self_Tensor_dim_Dimname, 3}, {"_torch_cpp_torch_namespace_topk_out_values_Tensor_indices_Tensor_self_Tensor_k_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_topk_out_values_Tensor_indices_Tensor_self_Tensor_k_int64_t, 7}, {"_torch_cpp_torch_namespace_topk_self_Tensor_k_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_topk_self_Tensor_k_int64_t, 5}, @@ -47904,57 +50033,70 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__amp_update_scale__self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__amp_update_scale__self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t, 6}, {"_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalar_Scalar, 2}, {"_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar, 2}, - {"_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar, 2}, {"_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList, 3}, {"_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor, 3}, + {"_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor, 3}, + {"_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar, 2}, {"_torch_cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList, 3}, {"_torch_cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar, 2}, {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor, 2}, + {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor, 2}, + {"_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar, 2}, {"_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor, 2}, + {"_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_self_TensorList_scalars_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum__self_TensorList_scalars_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum__self_TensorList_scalars_ArrayRefScalar, 2}, - {"_torch_cpp_torch_namespace__foreach_exp_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_zero__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_exp__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, + {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, + {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, + {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, {"_torch_cpp_torch_namespace__foreach_abs_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_abs_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_abs__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_abs__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_acos_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_acos_self_TensorList, 1}, @@ -47973,10 +50115,20 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__foreach_erf__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_erf__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_erfc_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_erfc_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_erfc__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_erfc__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_exp_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_exp__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_expm1_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_expm1_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_expm1__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_expm1__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_floor_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_floor_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_floor__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_floor__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_frac_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_frac__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_log_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_log__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_log10_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log10_self_TensorList, 1}, @@ -47985,51 +50137,47 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__foreach_log1p__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log1p__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_log2_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log2_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_log2__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log2__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_max_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_max_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_neg_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_neg_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_neg__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_neg__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_tan_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_tan__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_tanh_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_tanh__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sin_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sin__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sinh_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sinh__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_round_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_round__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_frac_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_frac__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_norm_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_norm_self_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar, 2}, + {"_torch_cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar, 2}, + {"_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar, 2}, {"_torch_cpp_torch_namespace__foreach_reciprocal_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_reciprocal_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_reciprocal__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_reciprocal__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_round_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_round__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_sigmoid_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sigmoid_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_sigmoid__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sigmoid__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sign_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sign_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sign__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sign__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sin_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sin__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sinh_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sinh__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_tan_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_tan__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_tanh_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_tanh__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh__self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_trunc_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_trunc_self_TensorList, 1}, {"_torch_cpp_torch_namespace__foreach_trunc__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_trunc__self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, - {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, - {"_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList, 4}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 4}, - {"_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 4}, - {"_torch_cpp_torch_namespace__foreach_norm_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_norm_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_zero__self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero__self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList, 3}, {"_torch_cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_bucketize_self_Tensor_boundaries_Tensor, 4}, {"_torch_cpp_torch_namespace_bucketize_out_out_Tensor_self_Tensor_boundaries_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_bucketize_out_out_Tensor_self_Tensor_boundaries_Tensor, 5}, {"_torch_cpp_torch_namespace_bucketize_self_Scalar_boundaries_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_bucketize_self_Scalar_boundaries_Tensor, 4}, {"_torch_cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Tensor, 6}, {"_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Tensor, 7}, {"_torch_cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Scalar, 6}, + {"_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar, 7}, {"_torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t, 3}, {"_torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_out_out_Tensor_self_Tensor_size_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__convert_indices_from_coo_to_csr_out_out_Tensor_self_Tensor_size_int64_t, 4}, {"_torch_cpp_torch_namespace__convert_indices_from_csr_to_coo_crow_indices_Tensor_col_indices_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__convert_indices_from_csr_to_coo_crow_indices_Tensor_col_indices_Tensor, 4}, @@ -48452,6 +50600,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_logdet_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_logdet_self_Tensor, 1}, {"_torch_cpp_torch_namespace_linalg_eig_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_eig_self_Tensor, 1}, {"_torch_cpp_torch_namespace_linalg_eig_out_eigenvalues_Tensor_eigenvectors_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_eig_out_eigenvalues_Tensor_eigenvectors_Tensor_self_Tensor, 3}, + {"_torch_cpp_torch_namespace__linalg_eigvals_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__linalg_eigvals_self_Tensor, 1}, {"_torch_cpp_torch_namespace_linalg_eigvals_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_eigvals_self_Tensor, 1}, {"_torch_cpp_torch_namespace_linalg_eigvals_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_eigvals_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__linalg_eigh_A_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__linalg_eigh_A_Tensor, 3}, @@ -48507,6 +50656,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_linalg_solve_ex_A_Tensor_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_solve_ex_A_Tensor_B_Tensor, 4}, {"_torch_cpp_torch_namespace_linalg_solve_ex_out_result_Tensor_info_Tensor_A_Tensor_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_solve_ex_out_result_Tensor_info_Tensor_A_Tensor_B_Tensor, 6}, {"_torch_cpp_torch_namespace_linalg_solve_A_Tensor_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_solve_A_Tensor_B_Tensor, 3}, + {"_torch_cpp_torch_namespace__spsolve_A_Tensor_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__spsolve_A_Tensor_B_Tensor, 3}, {"_torch_cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor, 4}, {"_torch_cpp_torch_namespace_linalg_tensorinv_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_tensorinv_self_Tensor, 2}, {"_torch_cpp_torch_namespace_linalg_tensorinv_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_tensorinv_out_out_Tensor_self_Tensor, 3}, @@ -48528,6 +50678,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_linalg_multi_dot_out_out_Tensor_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_linalg_multi_dot_out_out_Tensor_tensors_TensorList, 2}, {"_torch_cpp_torch_namespace_nested_to_padded_tensor_self_Tensor_padding_double", (DL_FUNC) &_torch_cpp_torch_namespace_nested_to_padded_tensor_self_Tensor_padding_double, 3}, {"_torch_cpp_torch_namespace__test_serialization_subcmul_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__test_serialization_subcmul_self_Tensor_other_Tensor, 3}, + {"_torch_cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t, 3}, {"_torch_cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef, 2}, {"_torch_cpp_torch_namespace__test_optional_filled_intlist_values_Tensor_addends_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__test_optional_filled_intlist_values_Tensor_addends_IntArrayRef, 2}, {"_torch_cpp_torch_namespace__test_optional_floatlist_values_Tensor_addends_ArrayRefdouble", (DL_FUNC) &_torch_cpp_torch_namespace__test_optional_floatlist_values_Tensor_addends_ArrayRefdouble, 2}, @@ -48538,7 +50689,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__test_autograd_multiple_dispatch_view_copy_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__test_autograd_multiple_dispatch_view_copy_self_Tensor, 1}, {"_torch_cpp_torch_namespace_segment_reduce_data_Tensor_reduce_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace_segment_reduce_data_Tensor_reduce_c10string_view, 8}, {"_torch_cpp_torch_namespace__segment_reduce_backward_grad_Tensor_output_Tensor_data_Tensor_reduce_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace__segment_reduce_backward_grad_Tensor_output_Tensor_data_Tensor_reduce_c10string_view, 8}, - {"_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList, 3}, + {"_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_pad_sequence_sequences_TensorList, 4}, {"_torch_cpp_torch_namespace_flatten_dense_tensors_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_flatten_dense_tensors_tensors_TensorList, 1}, {"_torch_cpp_torch_namespace_unflatten_dense_tensors_flat_Tensor_tensors_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace_unflatten_dense_tensors_flat_Tensor_tensors_TensorList, 2}, {"_torch_cpp_torch_namespace__nested_tensor_from_tensor_list_list_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_from_tensor_list_list_TensorList, 5}, @@ -48581,28 +50732,35 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_view_copy_self_Tensor_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_view_copy_self_Tensor_dtype_ScalarType, 2}, {"_torch_cpp_torch_namespace_unfold_copy_self_Tensor_dimension_int64_t_size_int64_t_step_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_unfold_copy_self_Tensor_dimension_int64_t_size_int64_t_step_int64_t, 4}, {"_torch_cpp_torch_namespace_alias_copy_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_alias_copy_self_Tensor, 1}, + {"_torch_cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef, 4}, + {"_torch_cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList, 3}, {"_torch_cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor, 2}, + {"_torch_cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t, 3}, {"_torch_cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor, 20}, {"_torch_cpp_torch_namespace__native_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__native_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 13}, - {"_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor, 6}, - {"_torch_cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor, 7}, - {"_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor, 6}, - {"_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor, 7}, - {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor, 6}, - {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t, 14}, - {"_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool, 5}, - {"_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor, 8}, - {"_torch_cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor, 4}, - {"_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool", (DL_FUNC) &_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool, 10}, - {"_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t, 14}, - {"_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t, 8}, - {"_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor, 8}, + {"_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor, 8}, + {"_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor, 8}, + {"_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor, 9}, + {"_torch_cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor, 8}, + {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor, 7}, + {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor, 7}, + {"_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor, 8}, + {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor, 15}, + {"_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool, 10}, + {"_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor, 17}, + {"_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool, 8}, + {"_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4, 13}, + {"_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool, 9}, + {"_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool", (DL_FUNC) &_torch_cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool, 16}, + {"_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool", (DL_FUNC) &_torch_cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool, 15}, + {"_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor, 17}, + {"_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t, 14}, + {"_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool", (DL_FUNC) &_torch_cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool, 20}, {"_torch_cpp_torch_namespace__triton_scaled_dot_attention_q_Tensor_k_Tensor_v_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__triton_scaled_dot_attention_q_Tensor_k_Tensor_v_Tensor, 4}, + {"_torch_cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t, 4}, {"_torch_cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 10}, {"_torch_cpp_torch_namespace_special_airy_ai_x_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_airy_ai_x_Tensor, 1}, {"_torch_cpp_torch_namespace_special_airy_ai_out_out_Tensor_x_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_airy_ai_out_out_Tensor_x_Tensor, 2}, - {"_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor, 21}, - {"_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 14}, {"_torch_cpp_torch_namespace_special_bessel_j0_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_bessel_j0_self_Tensor, 1}, {"_torch_cpp_torch_namespace_special_bessel_j0_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_bessel_j0_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_special_bessel_j1_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_bessel_j1_self_Tensor, 1}, @@ -48699,7 +50857,13 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_special_spherical_bessel_j0_out_out_Tensor_x_Tensor, 2}, {"_torch_cpp_torch_namespace__foobar_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foobar_self_Tensor, 4}, {"_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, {"_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 12}, + {"_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 12}, + {"_torch_cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool, 11}, + {"_torch_cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor, 2}, {"_torch_cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor, 4}, {"_torch_cpp_torch_namespace__cudnn_ctc_loss_out_out0_Tensor_out1_Tensor_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t_deterministic_bool_zero_infinity_bool", (DL_FUNC) &_torch_cpp_torch_namespace__cudnn_ctc_loss_out_out0_Tensor_out1_Tensor_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t_deterministic_bool_zero_infinity_bool, 9}, {"_torch_cpp_torch_namespace__cudnn_rnn_flatten_weight_out_out_Tensor_weight_arr_TensorList_weight_stride0_int64_t_input_size_int64_t_mode_int64_t_hidden_size_int64_t_proj_size_int64_t_num_layers_int64_t_batch_first_bool_bidirectional_bool", (DL_FUNC) &_torch_cpp_torch_namespace__cudnn_rnn_flatten_weight_out_out_Tensor_weight_arr_TensorList_weight_stride0_int64_t_input_size_int64_t_mode_int64_t_hidden_size_int64_t_proj_size_int64_t_num_layers_int64_t_batch_first_bool_bidirectional_bool, 10}, @@ -48714,6 +50878,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__add_relu_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__add_relu_out_out_Tensor_self_Tensor_other_Scalar, 4}, {"_torch_cpp_torch_namespace_add_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_add_out_out_Tensor_self_Tensor_other_Scalar, 4}, {"_torch_cpp_torch_namespace_affine_grid_generator_out_out_Tensor_theta_Tensor_size_IntArrayRef_align_corners_bool", (DL_FUNC) &_torch_cpp_torch_namespace_affine_grid_generator_out_out_Tensor_theta_Tensor_size_IntArrayRef_align_corners_bool, 4}, + {"_torch_cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor, 3}, {"_torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t, 2}, {"_torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t_periodic_bool", (DL_FUNC) &_torch_cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t_periodic_bool, 3}, {"_torch_cpp_torch_namespace_quantized_batch_norm_out_out_Tensor_input_Tensor_weight_Tensor_bias_Tensor_mean_Tensor_var_Tensor_eps_double_output_scale_double_output_zero_point_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_batch_norm_out_out_Tensor_input_Tensor_weight_Tensor_bias_Tensor_mean_Tensor_var_Tensor_eps_double_output_scale_double_output_zero_point_int64_t, 9}, @@ -48741,7 +50906,6 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_cudnn_affine_grid_generator_backward_out_out_Tensor_grad_Tensor_FALSE_int64_t_C_int64_t_H_int64_t_W_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_affine_grid_generator_backward_out_out_Tensor_grad_Tensor_FALSE_int64_t_C_int64_t_H_int64_t_W_int64_t, 6}, {"_torch_cpp_torch_namespace_cudnn_batch_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_exponential_average_factor_double_epsilon_double", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_batch_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_exponential_average_factor_double_epsilon_double, 12}, {"_torch_cpp_torch_namespace_cudnn_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_grad_output_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_epsilon_double_reserveSpace_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_grad_output_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_epsilon_double_reserveSpace_Tensor, 12}, - {"_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool, 10}, {"_torch_cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool", (DL_FUNC) &_torch_cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool, 11}, {"_torch_cpp_torch_namespace__mps_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__mps_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t, 8}, {"_torch_cpp_torch_namespace_mps_convolution_transpose_backward_out_out0_Tensor_out1_Tensor_self_Tensor_grad_output_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_output_mask_stdarraybool2", (DL_FUNC) &_torch_cpp_torch_namespace_mps_convolution_transpose_backward_out_out0_Tensor_out1_Tensor_self_Tensor_grad_output_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_output_mask_stdarraybool2, 11}, @@ -48765,6 +50929,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__embedding_bag_dense_backward_out_out_Tensor_grad_Tensor_indices_Tensor_offset2bag_Tensor_bag_size_Tensor_maximum_indices_Tensor_num_weights_int64_t_scale_grad_by_freq_bool_mode_int64_t_per_sample_weights_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__embedding_bag_dense_backward_out_out_Tensor_grad_Tensor_indices_Tensor_offset2bag_Tensor_bag_size_Tensor_maximum_indices_Tensor_num_weights_int64_t_scale_grad_by_freq_bool_mode_int64_t_per_sample_weights_Tensor, 11}, {"_torch_cpp_torch_namespace__embedding_bag_per_sample_weights_backward_out_out_Tensor_grad_Tensor_weight_Tensor_indices_Tensor_offsets_Tensor_offset2bag_Tensor_mode_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__embedding_bag_per_sample_weights_backward_out_out_Tensor_grad_Tensor_weight_Tensor_indices_Tensor_offsets_Tensor_offset2bag_Tensor_mode_int64_t, 8}, {"_torch_cpp_torch_namespace_empty_out_out_Tensor_size_IntArrayRef_names_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_empty_out_out_Tensor_size_IntArrayRef_names_DimnameList, 4}, + {"_torch_cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef, 3}, {"_torch_cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef, 3}, {"_torch_cpp_torch_namespace_new_empty_strided_out_out_Tensor_self_Tensor_size_IntArrayRef_stride_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_new_empty_strided_out_out_Tensor_self_Tensor_size_IntArrayRef_stride_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_new_full_out_out_Tensor_self_Tensor_size_IntArrayRef_fill_value_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_new_full_out_out_Tensor_self_Tensor_size_IntArrayRef_fill_value_Scalar, 4}, @@ -48781,6 +50946,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_empty_strided_out_out_Tensor_size_IntArrayRef_stride_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_empty_strided_out_out_Tensor_size_IntArrayRef_stride_IntArrayRef, 3}, {"_torch_cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Scalar, 3}, {"_torch_cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Tensor, 3}, + {"_torch_cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar, 3}, {"_torch_cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList, 4}, {"_torch_cpp_torch_namespace_full_like_out_out_Tensor_self_Tensor_fill_value_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_full_like_out_out_Tensor_self_Tensor_fill_value_Scalar, 4}, {"_torch_cpp_torch_namespace_from_file_out_out_Tensor_filename_c10string_view", (DL_FUNC) &_torch_cpp_torch_namespace_from_file_out_out_Tensor_filename_c10string_view, 4}, @@ -48800,9 +50966,9 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_kaiser_window_out_out_Tensor_window_length_int64_t_periodic_bool_beta_double", (DL_FUNC) &_torch_cpp_torch_namespace_kaiser_window_out_out_Tensor_window_length_int64_t_periodic_bool_beta_double, 4}, {"_torch_cpp_torch_namespace_native_group_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_FALSE_int64_t_C_int64_t_HxW_int64_t_group_int64_t_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_native_group_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_FALSE_int64_t_C_int64_t_HxW_int64_t_group_int64_t_eps_double, 11}, {"_torch_cpp_torch_namespace_native_group_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_rstd_Tensor_weight_Tensor_FALSE_int64_t_C_int64_t_HxW_int64_t_group_int64_t_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_native_group_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_rstd_Tensor_weight_Tensor_FALSE_int64_t_C_int64_t_HxW_int64_t_group_int64_t_output_mask_stdarraybool3, 13}, - {"_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 5}, - {"_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 6}, - {"_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor, 5}, + {"_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 5}, + {"_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 6}, + {"_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor, 5}, {"_torch_cpp_torch_namespace_isnan_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_isnan_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_native_layer_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_normalized_shape_IntArrayRef_weight_Tensor_bias_Tensor_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_native_layer_norm_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_normalized_shape_IntArrayRef_weight_Tensor_bias_Tensor_eps_double, 8}, {"_torch_cpp_torch_namespace_native_layer_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_normalized_shape_IntArrayRef_mean_Tensor_rstd_Tensor_weight_Tensor_bias_Tensor_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_native_layer_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_normalized_shape_IntArrayRef_mean_Tensor_rstd_Tensor_weight_Tensor_bias_Tensor_output_mask_stdarraybool3, 11}, @@ -48821,6 +50987,7 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_mkldnn_max_pool3d_backward_out_out_Tensor_grad_output_Tensor_output_Tensor_input_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_max_pool3d_backward_out_out_Tensor_grad_output_Tensor_output_Tensor_input_Tensor_kernel_size_IntArrayRef, 9}, {"_torch_cpp_torch_namespace_quantized_max_pool1d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool1d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef, 7}, {"_torch_cpp_torch_namespace_quantized_max_pool2d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool2d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef, 7}, + {"_torch_cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef, 7}, {"_torch_cpp_torch_namespace_median_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_median_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace_nanmedian_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_nanmedian_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__mps_convolution_out_out_Tensor_self_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__mps_convolution_out_out_Tensor_self_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t, 8}, @@ -48838,12 +51005,13 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__sparse_sparse_matmul_out_out_Tensor_self_Tensor_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sparse_matmul_out_out_Tensor_self_Tensor_other_Tensor, 3}, {"_torch_cpp_torch_namespace_mul_out_out_Tensor_self_Tensor_other_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_mul_out_out_Tensor_self_Tensor_other_Scalar, 3}, {"_torch_cpp_torch_namespace__native_batch_norm_legit_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double, 8}, + {"_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 10}, {"_torch_cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double, 4}, {"_torch_cpp_torch_namespace_batch_norm_gather_stats_out_out0_Tensor_out1_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_count_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_gather_stats_out_out0_Tensor_out1_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_count_int64_t, 10}, {"_torch_cpp_torch_namespace_batch_norm_gather_stats_with_counts_out_out0_Tensor_out1_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_counts_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_gather_stats_with_counts_out_out0_Tensor_out1_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double_counts_Tensor, 10}, {"_torch_cpp_torch_namespace_native_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_invstd_Tensor_train_bool_eps_double_output_mask_stdarraybool3", (DL_FUNC) &_torch_cpp_torch_namespace_native_batch_norm_backward_out_out0_Tensor_out1_Tensor_out2_Tensor_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_invstd_Tensor_train_bool_eps_double_output_mask_stdarraybool3, 13}, {"_torch_cpp_torch_namespace_batch_norm_backward_reduce_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_reduce_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_input_g_bool_weight_g_bool_bias_g_bool, 12}, - {"_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor, 9}, + {"_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor, 9}, {"_torch_cpp_torch_namespace_batch_norm_update_stats_out_out0_Tensor_out1_Tensor_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double", (DL_FUNC) &_torch_cpp_torch_namespace_batch_norm_update_stats_out_out0_Tensor_out1_Tensor_input_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double, 6}, {"_torch_cpp_torch_namespace__nnpack_spatial_convolution_out_out_Tensor_input_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__nnpack_spatial_convolution_out_out_Tensor_input_Tensor_weight_Tensor_bias_Tensor_padding_IntArrayRef, 6}, {"_torch_cpp_torch_namespace_ones_out_out_Tensor_size_IntArrayRef_names_DimnameList", (DL_FUNC) &_torch_cpp_torch_namespace_ones_out_out_Tensor_size_IntArrayRef_names_DimnameList, 3}, @@ -48890,8 +51058,11 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__nested_from_padded_out_out_Tensor_padded_Tensor_cpu_nested_shape_example_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_from_padded_out_out_Tensor_padded_Tensor_cpu_nested_shape_example_Tensor, 4}, {"_torch_cpp_torch_namespace__nested_tensor_size_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_size_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__nested_tensor_strides_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_strides_out_out_Tensor_self_Tensor, 2}, + {"_torch_cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor, 3}, - {"_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef, 5}, + {"_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor, 5}, + {"_torch_cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor, 8}, + {"_torch_cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__trilinear_out_out_Tensor_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__trilinear_out_out_Tensor_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef, 9}, {"_torch_cpp_torch_namespace__unique_out_out0_Tensor_out1_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__unique_out_out0_Tensor_out1_Tensor_self_Tensor, 5}, {"_torch_cpp_torch_namespace_unique_dim_out_out0_Tensor_out1_Tensor_out2_Tensor_self_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_unique_dim_out_out0_Tensor_out1_Tensor_out2_Tensor_self_Tensor_dim_int64_t, 8}, @@ -48912,6 +51083,8 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace_binomial_out_out_Tensor_count_Tensor_prob_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_binomial_out_out_Tensor_count_Tensor_prob_Tensor, 4}, {"_torch_cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar, 3}, {"_torch_cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType, 6}, + {"_torch_cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 7}, + {"_torch_cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double", (DL_FUNC) &_torch_cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double, 11}, {"_torch_cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef, 3}, {"_torch_cpp_torch_namespace__sparse_sum_backward_out_out_Tensor_grad_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_sum_backward_out_out_Tensor_grad_Tensor_self_Tensor_dim_IntArrayRef, 4}, {"_torch_cpp_torch_namespace__sparse_csr_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_csr_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef, 5}, @@ -48936,27 +51109,28 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__sparse_addmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_addmm_out_out_Tensor_self_Tensor_mat1_Tensor_mat2_Tensor, 6}, {"_torch_cpp_torch_namespace_sparse_coo_tensor_out_out_Tensor_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_coo_tensor_out_out_Tensor_size_IntArrayRef, 2}, {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef, 4}, - {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor, 6}, + {"_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor, 7}, {"_torch_cpp_torch_namespace_sparse_resize_out_out_Tensor_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_resize_out_out_Tensor_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 5}, {"_torch_cpp_torch_namespace_sparse_resize_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_resize_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 4}, {"_torch_cpp_torch_namespace_sparse_resize_and_clear_out_out_Tensor_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_resize_and_clear_out_out_Tensor_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 5}, {"_torch_cpp_torch_namespace_sparse_resize_and_clear_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_resize_and_clear_self_Tensor_size_IntArrayRef_sparse_dim_int64_t_dense_dim_int64_t, 4}, {"_torch_cpp_torch_namespace_sparse_mask_out_out_Tensor_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_sparse_mask_out_out_Tensor_self_Tensor_mask_Tensor, 3}, - {"_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor, 3}, + {"_torch_cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor, 4}, + {"_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor, 4}, {"_torch_cpp_torch_namespace__coalesce_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__coalesce_out_out_Tensor_self_Tensor, 2}, {"_torch_cpp_torch_namespace__coalesced_out_out_Tensor_self_Tensor_coalesced_bool", (DL_FUNC) &_torch_cpp_torch_namespace__coalesced_out_out_Tensor_self_Tensor_coalesced_bool, 3}, {"_torch_cpp_torch_namespace__coalesced_self_Tensor_coalesced_bool", (DL_FUNC) &_torch_cpp_torch_namespace__coalesced_self_Tensor_coalesced_bool, 2}, {"_torch_cpp_torch_namespace_copy_sparse_to_sparse_out_out_Tensor_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_copy_sparse_to_sparse_out_out_Tensor_self_Tensor_src_Tensor, 4}, {"_torch_cpp_torch_namespace_copy_sparse_to_sparse_self_Tensor_src_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_copy_sparse_to_sparse_self_Tensor_src_Tensor, 3}, - {"_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t, 3}, - {"_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor, 5}, - {"_torch_cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor, 3}, - {"_torch_cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor, 3}, - {"_torch_cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef, 4}, - {"_torch_cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef, 4}, + {"_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t, 3}, + {"_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor, 5}, + {"_torch_cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor, 3}, + {"_torch_cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor, 3}, + {"_torch_cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef, 4}, + {"_torch_cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef, 4}, {"_torch_cpp_torch_namespace_to_mkldnn_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_to_mkldnn_out_out_Tensor_self_Tensor, 3}, {"_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv2d_weight_out_out_Tensor_self_Tensor, 7}, - {"_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor, 6}, + {"_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor, 7}, {"_torch_cpp_torch_namespace_quantize_per_tensor_dynamic_out_out_Tensor_self_Tensor_dtype_ScalarType_reduce_range_bool", (DL_FUNC) &_torch_cpp_torch_namespace_quantize_per_tensor_dynamic_out_out_Tensor_self_Tensor_dtype_ScalarType_reduce_range_bool, 4}, {"_torch_cpp_torch_namespace_quantize_per_tensor_out_out_Tensor_self_Tensor_scale_double_zero_point_int64_t_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_quantize_per_tensor_out_out_Tensor_self_Tensor_scale_double_zero_point_int64_t_dtype_ScalarType, 5}, {"_torch_cpp_torch_namespace_quantize_per_tensor_out_out_Tensor_self_Tensor_scale_Tensor_zero_point_Tensor_dtype_ScalarType", (DL_FUNC) &_torch_cpp_torch_namespace_quantize_per_tensor_out_out_Tensor_self_Tensor_scale_Tensor_zero_point_Tensor_dtype_ScalarType, 5}, @@ -49036,7 +51210,6 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__histogramdd_from_bin_cts_out_out_Tensor_self_Tensor_bins_IntArrayRef", (DL_FUNC) &_torch_cpp_torch_namespace__histogramdd_from_bin_cts_out_out_Tensor_self_Tensor_bins_IntArrayRef, 6}, {"_torch_cpp_torch_namespace__histogramdd_from_bin_tensors_out_out_Tensor_self_Tensor_bins_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__histogramdd_from_bin_tensors_out_out_Tensor_self_Tensor_bins_TensorList, 5}, {"_torch_cpp_torch_namespace_remainder_out_out_Tensor_self_Scalar_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_remainder_out_out_Tensor_self_Scalar_other_Tensor, 3}, - {"_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool", (DL_FUNC) &_torch_cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool, 5}, {"_torch_cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t, 6}, {"_torch_cpp_torch_namespace_normal_out_out_Tensor_self_Tensor_mean_double_std_double", (DL_FUNC) &_torch_cpp_torch_namespace_normal_out_out_Tensor_self_Tensor_mean_double_std_double, 5}, {"_torch_cpp_torch_namespace__amp_foreach_non_finite_check_and_unscale_out_out_TensorList_self_TensorList_found_inf_Tensor_inv_scale_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__amp_foreach_non_finite_check_and_unscale_out_out_TensorList_self_TensorList_found_inf_Tensor_inv_scale_Tensor, 4}, @@ -49044,33 +51217,38 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__amp_update_scale_out_out_Tensor_self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__amp_update_scale_out_out_Tensor_self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t, 7}, {"_torch_cpp_torch_namespace__amp_update_scale_self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace__amp_update_scale_self_Tensor_growth_tracker_Tensor_found_inf_Tensor_scale_growth_factor_double_scale_backoff_factor_double_growth_interval_int64_t, 6}, {"_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, - {"_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, {"_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor, 4}, + {"_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, {"_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor, 3}, + {"_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, {"_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, - {"_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, {"_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, - {"_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, - {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor, 3}, + {"_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList, 3}, {"_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList, 3}, {"_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, + {"_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList, 3}, {"_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar, 3}, - {"_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_zero_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero_self_TensorList, 1}, - {"_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList, 5}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 5}, + {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 5}, + {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList, 5}, + {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 5}, + {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 5}, {"_torch_cpp_torch_namespace__foreach_abs_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_abs_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_acos_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_acos_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_asin_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_asin_out_out_TensorList_self_TensorList, 2}, @@ -49080,34 +51258,37 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__foreach_cosh_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_cosh_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_erf_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_erf_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_erfc_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_erfc_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_floor_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_floor_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar, 4}, + {"_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_log10_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log10_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_log1p_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log1p_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_log2_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_log2_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList, 4}, + {"_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList, 3}, + {"_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar, 3}, + {"_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar, 3}, {"_torch_cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_sigmoid_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sigmoid_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList, 2}, {"_torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList, 2}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList, 5}, - {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList, 5}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 5}, - {"_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 5}, - {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar, 5}, - {"_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor, 5}, - {"_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList, 3}, - {"_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList, 4}, - {"_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar, 4}, + {"_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList, 2}, + {"_torch_cpp_torch_namespace__foreach_zero_self_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_zero_self_TensorList, 1}, + {"_torch_cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList", (DL_FUNC) &_torch_cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList, 4}, {"_torch_cpp_torch_namespace_bucketize_out_out_Tensor_self_Scalar_boundaries_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_bucketize_out_out_Tensor_self_Scalar_boundaries_Tensor, 5}, - {"_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar", (DL_FUNC) &_torch_cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar, 7}, {"_torch_cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t, 5}, {"_torch_cpp_torch_namespace_glu_backward_jvp_out_out_Tensor_grad_x_Tensor_grad_glu_Tensor_x_Tensor_dgrad_glu_Tensor_dx_Tensor_dim_int64_t", (DL_FUNC) &_torch_cpp_torch_namespace_glu_backward_jvp_out_out_Tensor_grad_x_Tensor_grad_glu_Tensor_x_Tensor_dgrad_glu_Tensor_dx_Tensor_dim_int64_t, 7}, {"_torch_cpp_torch_namespace_hardswish_backward_out_out_Tensor_grad_output_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace_hardswish_backward_out_out_Tensor_grad_output_Tensor_self_Tensor, 3}, @@ -49170,13 +51351,21 @@ static const R_CallMethodDef CallEntries[] = { {"_torch_cpp_torch_namespace__native_multi_head_attention_out_out0_Tensor_out1_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__native_multi_head_attention_out_out0_Tensor_out1_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 15}, {"_torch_cpp_torch_namespace__triton_scaled_dot_attention_out_out_Tensor_q_Tensor_k_Tensor_v_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__triton_scaled_dot_attention_out_out_Tensor_q_Tensor_k_Tensor_v_Tensor, 5}, {"_torch_cpp_torch_namespace__triton_multi_head_attention_out_out_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__triton_multi_head_attention_out_out_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 11}, - {"_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor, 24}, - {"_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor, 18}, {"_torch_cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor", (DL_FUNC) &_torch_cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor, 5}, {"_torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 16}, {"_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 16}, + {"_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, {"_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 16}, {"_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 16}, + {"_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool, 15}, + {"_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 13}, + {"_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 12}, + {"_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 13}, + {"_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool, 12}, + {"_torch_cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool, 12}, + {"_torch_cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool", (DL_FUNC) &_torch_cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool, 11}, {"_torch_cpp_torch_generator", (DL_FUNC) &_torch_cpp_torch_generator, 0}, {"_torch_cpp_generator_current_seed", (DL_FUNC) &_torch_cpp_generator_current_seed, 1}, {"_torch_cpp_generator_set_current_seed", (DL_FUNC) &_torch_cpp_generator_set_current_seed, 2}, diff --git a/src/autograd.cpp b/src/autograd.cpp index 994a692f89..2cf760a304 100644 --- a/src/autograd.cpp +++ b/src/autograd.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -57,15 +58,19 @@ void call_r_gc(bool full); namespace { -EventLoop gTasks; -EventLoop gBackwardTasks; -std::atomic backward_is_running(false); -static ThreadPool* pool; +static EventLoop gTasks; +static EventLoop gBackwardTasks; +static std::atomic backward_is_running(false); void schedule_backward_task(std::packaged_task&& task) { - if (!pool) { - pool = new ThreadPool(5); - } + // This needs to be a pointer because on Windows, disposing of this objects + // when the process exits causes a crash. + static auto pool = std::unique_ptr, std::function*)>>( + new ThreadPool(5), + [](auto p) { + p->stop(); + } + ); if (std::this_thread::get_id() == main_thread_id()) { pool->push(std::move(task)); @@ -85,7 +90,6 @@ void cpp_torch_method__backward_self_Tensor_inputs_TensorList( auto running_sg = makeScopeGuard([] {backward_is_running = false;}); std::function backward([&]() { auto sg = makeScopeGuard([] { gTasks.stopWhenEmpty(); }); - lantern_Tensor__backward_tensor_tensorlist_tensor_bool_bool( self.get(), inputs.get(), gradient.get(), retain_graph.get(), create_graph.get()); diff --git a/src/codegen.cpp b/src/codegen.cpp index aa459e49c0..436e307997 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -15,6 +15,7 @@ inline bool is_in(const std::string& x, const std::set& y) { std::string cpp_arg_to_torch_type(SEXP obj, const std::vector& expected_types, const std::string& arg_name) { + if (Rf_isSymbol(obj)) { return "Missing"; } @@ -98,18 +99,22 @@ std::string cpp_arg_to_torch_type(SEXP obj, bool is_numeric_or_list_or_null = is_numeric || is_list || is_null; - if (is_in("int64_t", etypes) && ((is_numeric && len == 1) || is_null)) { + if (is_in("int64_t", etypes) && ((is_numeric && len == 1))) { return "int64_t"; } - if (is_in("c10::SymInt", etypes) && ((is_numeric && len == 1) || is_null)) { - return "c10::SymInt"; - } - if (is_in("IntArrayRef", etypes) && is_numeric_or_list_or_null) { return "IntArrayRef"; } + if (is_in("int64_t", etypes) && is_null) { + return "int64_t"; + } + + if (is_in("c10::SymInt", etypes) && ((is_numeric && len == 1) || is_null)) { + return "c10::SymInt"; + } + if (is_in("ArrayRef", etypes) && (is_numeric || is_null)) { return "ArrayRef"; } @@ -118,6 +123,11 @@ std::string cpp_arg_to_torch_type(SEXP obj, return "double"; } + if (is_in("TensorList", etypes) && + (is_list || is_tensor || is_null || is_numeric)) { + return "TensorList"; + } + bool is_logical = Rf_isLogical(obj); if (is_in("bool", etypes) && is_logical && len == 1) { return "bool"; @@ -130,11 +140,6 @@ std::string cpp_arg_to_torch_type(SEXP obj, return "std::array"; } - if (is_in("TensorList", etypes) && - (is_list || is_tensor || is_null || is_numeric)) { - return "TensorList"; - } - if (is_in("Generator", etypes) && is_null) { return "Generator"; } @@ -152,6 +157,10 @@ std::string cpp_arg_to_torch_type(SEXP obj, return "const c10::List> &"; } + if (is_in("const c10::List<::std::optional> &", etypes) && is_list) { + return "const c10::List<::std::optional> &"; + } + Rcpp::stop("Can't convert argument:" + arg_name); } @@ -205,6 +214,11 @@ std::string create_fn_name(const std::string& fun_name, std::string type; for (auto x : nd_args) { type = cpp_arg_to_torch_type(args[x], expected_types[x], x); + //std::cout << "arg_name: " << x << ": " << type << std::endl; + for (auto y : Rcpp::as>(expected_types[x])) { + //std::cout << "expected_types: " << y << std::endl; + } + if (type != "Missing") { arg_names.push_back(x); arg_types.push_back(type); diff --git a/src/gen-namespace.cpp b/src/gen-namespace.cpp index 289046a831..58844c40f6 100644 --- a/src/gen-namespace.cpp +++ b/src/gen-namespace.cpp @@ -275,6 +275,12 @@ XPtrTorchTensor cpp_torch_method_all_self_Tensor_dim_int64_t (XPtrTorchTensor se return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method_all_self_Tensor_dim_IntArrayRef (XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_Tensor_all_tensor_intarrayref_bool(self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_all_self_Tensor_dim_Dimname (XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim) { auto r_out = lantern_Tensor_all_tensor_dimname_bool(self.get(), dim.get(), keepdim.get()); @@ -293,6 +299,12 @@ XPtrTorchTensor cpp_torch_method_any_self_Tensor_dim_int64_t (XPtrTorchTensor se return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method_any_self_Tensor_dim_IntArrayRef (XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_Tensor_any_tensor_intarrayref_bool(self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_any_self_Tensor_dim_Dimname (XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim) { auto r_out = lantern_Tensor_any_tensor_dimname_bool(self.get(), dim.get(), keepdim.get()); @@ -521,6 +533,12 @@ XPtrTorchTensor cpp_torch_method_copysign__self_Tensor_other_Scalar (XPtrTorchTe return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__lazy_clone_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern_Tensor__lazy_clone_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_logical_not_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_Tensor_logical_not_tensor(self.get()); @@ -1252,8 +1270,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_method_index_self_Tensor_indices_constc10Listc10optionalTensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { - auto r_out = lantern_Tensor_index_tensor_constclistcoptionaltensor(self.get(), indices.get()); +XPtrTorchTensor cpp_torch_method_index_self_Tensor_indices_constc10ListstdoptionalTensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { + auto r_out = lantern_Tensor_index_tensor_constcliststdoptionaltensor(self.get(), indices.get()); return XPtrTorchTensor(r_out); } @@ -1282,14 +1300,14 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_method_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { - auto r_out = lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); +XPtrTorchTensor cpp_torch_method_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_method_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { - auto r_out = lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); +XPtrTorchTensor cpp_torch_method_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); return XPtrTorchTensor(r_out); } @@ -2110,6 +2128,12 @@ XPtrTorchTensor cpp_torch_method_slice_self_Tensor (XPtrTorchTensor self, XPtrTo return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method_slice_inverse_self_Tensor_src_Tensor (XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step) { + auto r_out = lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(self.get(), src.get(), dim.get(), start.get(), end.get(), step.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_slice_scatter_self_Tensor_src_Tensor (XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step) { auto r_out = lantern_Tensor_slice_scatter_tensor_tensor_intt_intt_intt_intt(self.get(), src.get(), dim.get(), start.get(), end.get(), step.get()); @@ -2489,9 +2513,9 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchIntArrayRef cpp_torch_method__nested_tensor_offsets_self_Tensor (XPtrTorchTensor self) { - auto r_out = lantern_Tensor__nested_tensor_offsets_tensor(self.get()); -return XPtrTorchIntArrayRef(r_out); +XPtrTorchTensor cpp_torch_method__nested_tensor_storage_offsets_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern_Tensor__nested_tensor_storage_offsets_tensor(self.get()); +return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] @@ -2742,14 +2766,20 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_method_to_dense_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype) { - auto r_out = lantern_Tensor_to_dense_tensor_scalartype(self.get(), dtype.get()); +XPtrTorchTensor cpp_torch_method__sparse_mask_projection_self_Tensor_mask_Tensor (XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchbool accumulate_matches) { + auto r_out = lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(self.get(), mask.get(), accumulate_matches.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method_to_dense_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad) { + auto r_out = lantern_Tensor_to_dense_tensor_scalartype_bool(self.get(), dtype.get(), masked_grad.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_method__to_dense_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype) { - auto r_out = lantern_Tensor__to_dense_tensor_scalartype(self.get(), dtype.get()); +XPtrTorchTensor cpp_torch_method__to_dense_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad) { + auto r_out = lantern_Tensor__to_dense_tensor_scalartype_bool(self.get(), dtype.get(), masked_grad.get()); return XPtrTorchTensor(r_out); } @@ -2867,36 +2897,72 @@ XPtrTorchTensor cpp_torch_method_to_sparse_self_Tensor_sparse_dim_int64_t (XPtrT return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_self_Tensor_sparse_dim_int64_t (XPtrTorchTensor self, XPtrTorchint64_t sparse_dim) { + auto r_out = lantern_Tensor__to_sparse_tensor_intt(self.get(), sparse_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_sparse_self_Tensor (XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { auto r_out = lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(self.get(), layout.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_self_Tensor (XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(self.get(), layout.get(), blocksize.get(), dense_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_sparse_csr_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { auto r_out = lantern_Tensor_to_sparse_csr_tensor_intt(self.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_csr_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern_Tensor__to_sparse_csr_tensor_intt(self.get(), dense_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_sparse_csc_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { auto r_out = lantern_Tensor_to_sparse_csc_tensor_intt(self.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_csc_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern_Tensor__to_sparse_csc_tensor_intt(self.get(), dense_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_sparse_bsr_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { auto r_out = lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(self.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_bsr_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(self.get(), blocksize.get(), dense_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_sparse_bsc_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { auto r_out = lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(self.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method__to_sparse_bsc_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(self.get(), blocksize.get(), dense_dim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_method_to_mkldnn_self_Tensor (XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype) { auto r_out = lantern_Tensor_to_mkldnn_tensor_scalartype(self.get(), dtype.get()); @@ -3917,6 +3983,12 @@ XPtrTorchTensor cpp_torch_method_nonzero_self_Tensor (XPtrTorchTensor self) { return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_method_nonzero_static_self_Tensor_size_int64_t (XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value) { + auto r_out = lantern_Tensor_nonzero_static_tensor_intt_intt(self.get(), size.get(), fill_value.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_method_nonzero_numpy_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_Tensor_nonzero_numpy_tensor(self.get()); @@ -4672,11 +4744,60 @@ void cpp_torch_namespace__assert_async_self_Tensor (XPtrTorchTensor self) { lantern__assert_async_tensor(self.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__assert_async_self_Tensor_assert_msg_c10string_view (XPtrTorchTensor self, XPtrTorchstring_view assert_msg) { + lantern__assert_async_tensor_cstringview(self.get(), assert_msg.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__assert_scalar_self_Scalar_assert_msg_c10string_view (XPtrTorchScalar self, XPtrTorchstring_view assert_msg) { + lantern__assert_scalar_scalar_cstringview(self.get(), assert_msg.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__functional_assert_scalar_self_Scalar_assert_msg_c10string_view_dep_token_Tensor (XPtrTorchScalar self, XPtrTorchstring_view assert_msg, XPtrTorchTensor dep_token) { + auto r_out = lantern__functional_assert_scalar_scalar_cstringview_tensor(self.get(), assert_msg.get(), dep_token.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__functional_assert_async_self_Tensor_assert_msg_c10string_view_dep_token_Tensor (XPtrTorchTensor self, XPtrTorchstring_view assert_msg, XPtrTorchTensor dep_token) { + auto r_out = lantern__functional_assert_async_tensor_cstringview_tensor(self.get(), assert_msg.get(), dep_token.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__assert_tensor_metadata_a_Tensor (XPtrTorchTensor a, XPtrTorchOptionalIntArrayRef size, XPtrTorchOptionalIntArrayRef stride, XPtrTorchoptional_scalar_type dtype) { lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(a.get(), size.get(), stride.get(), dtype.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__print_s_c10string_view (XPtrTorchstring_view s) { + lantern__print_cstringview(s.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace_sym_constrain_range_size_Scalar (XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max) { + lantern_sym_constrain_range_scalar_intt_intt(size.get(), min.get(), max.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace_sym_constrain_range_for_size_size_Scalar (XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max) { + lantern_sym_constrain_range_for_size_scalar_intt_intt(size.get(), min.get(), max.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__functional_sym_constrain_range_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor (XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max, XPtrTorchTensor dep_token) { + auto r_out = lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(size.get(), min.get(), max.get(), dep_token.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__functional_sym_constrain_range_for_size_size_Scalar_min_int64_t_max_int64_t_dep_token_Tensor (XPtrTorchScalar size, XPtrTorchoptional_int64_t min, XPtrTorchoptional_int64_t max, XPtrTorchTensor dep_token) { + auto r_out = lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(size.get(), min.get(), max.get(), dep_token.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchbool cpp_torch_namespace__use_cudnn_ctc_loss_log_probs_Tensor_targets_Tensor_input_lengths_IntArrayRef_target_lengths_IntArrayRef_blank_int64_t (XPtrTorchTensor log_probs, XPtrTorchTensor targets, XPtrTorchIntArrayRef input_lengths, XPtrTorchIntArrayRef target_lengths, XPtrTorchint64_t blank) { auto r_out = lantern__use_cudnn_ctc_loss_tensor_tensor_intarrayref_intarrayref_intt(log_probs.get(), targets.get(), input_lengths.get(), target_lengths.get(), blank.get()); @@ -5141,18 +5262,36 @@ XPtrTorchTensor cpp_torch_namespace__test_check_tensor_self_Tensor (XPtrTorchTen return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__test_functorch_fallback_self_Tensor_other_Tensor (XPtrTorchTensor self, XPtrTorchTensor other) { + auto r_out = lantern__test_functorch_fallback_tensor_tensor(self.get(), other.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_int64_t (XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim) { auto r_out = lantern_all_tensor_intt_bool(self.get(), dim.get(), keepdim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_IntArrayRef (XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_all_tensor_intarrayref_bool(self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_int64_t (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim) { auto r_out = lantern_all_out_tensor_tensor_intt_bool(out.get(), self.get(), dim.get(), keepdim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_all_out_out_Tensor_self_Tensor_dim_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_all_out_tensor_tensor_intarrayref_bool(out.get(), self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_all_self_Tensor_dim_Dimname (XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim) { auto r_out = lantern_all_tensor_dimname_bool(self.get(), dim.get(), keepdim.get()); @@ -5177,12 +5316,24 @@ XPtrTorchTensor cpp_torch_namespace_any_self_Tensor_dim_int64_t (XPtrTorchTensor return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_any_self_Tensor_dim_IntArrayRef (XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_any_tensor_intarrayref_bool(self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_int64_t (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchbool keepdim) { auto r_out = lantern_any_out_tensor_tensor_intt_bool(out.get(), self.get(), dim.get(), keepdim.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_any_out_out_Tensor_self_Tensor_dim_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim) { + auto r_out = lantern_any_out_tensor_tensor_intarrayref_bool(out.get(), self.get(), dim.get(), keepdim.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_any_self_Tensor_dim_Dimname (XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool keepdim) { auto r_out = lantern_any_tensor_dimname_bool(self.get(), dim.get(), keepdim.get()); @@ -5623,6 +5774,12 @@ XPtrTorchTensor cpp_torch_namespace_copysign_out_out_Tensor_self_Tensor_other_Sc return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__lazy_clone_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__lazy_clone_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_logical_not_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_logical_not_tensor(self.get()); @@ -6265,6 +6422,12 @@ XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_self_Tensor_weight_Tensor_ return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32) { + auto r_out = lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out.get(), self.get(), weight.get(), padding.get(), stride.get(), dilation.get(), groups.get(), benchmark.get(), deterministic.get(), allow_tf32.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_transpose_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool (XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef output_padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32) { auto r_out = lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(self.get(), weight.get(), padding.get(), output_padding.get(), stride.get(), dilation.get(), groups.get(), benchmark.get(), deterministic.get(), allow_tf32.get()); @@ -6808,6 +6971,12 @@ XPtrTorchTensor cpp_torch_namespace_empty_size_IntArrayRef (XPtrTorchIntArrayRef return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_empty_permuted_size_IntArrayRef_physical_layout_IntArrayRef (XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef physical_layout, XPtrTorchTensorOptions options) { + auto r_out = lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(size.get(), physical_layout.get(), options.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__empty_affine_quantized_size_IntArrayRef (XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchdouble scale, XPtrTorchint64_t zero_point, XPtrTorchoptional_memory_format memory_format) { auto r_out = lantern__empty_affine_quantized_intarrayref_tensoroptions_double_intt_memoryformat(size.get(), options.get(), scale.get(), zero_point.get(), memory_format.get()); @@ -7311,36 +7480,32 @@ void cpp_torch_namespace__validate_compressed_sparse_indices_is_crow_bool_compre } // [[Rcpp::export(rng=false)]] -XPtrTorchint64_t cpp_torch_namespace__cufft_get_plan_cache_size_device_index_int64_t (XPtrTorchint64_t device_index) { - auto r_out = lantern__cufft_get_plan_cache_size_intt(device_index.get()); -return XPtrTorchint64_t(r_out); -} - -// [[Rcpp::export(rng=false)]] -XPtrTorchint64_t cpp_torch_namespace__cufft_get_plan_cache_max_size_device_index_int64_t (XPtrTorchint64_t device_index) { - auto r_out = lantern__cufft_get_plan_cache_max_size_intt(device_index.get()); -return XPtrTorchint64_t(r_out); +XPtrTorchTensor cpp_torch_namespace_index_self_Tensor_indices_constc10ListstdoptionalTensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { + auto r_out = lantern_index_tensor_constcliststdoptionaltensor(self.get(), indices.get()); +return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__cufft_set_plan_cache_max_size_device_index_int64_t_max_size_int64_t (XPtrTorchint64_t device_index, XPtrTorchint64_t max_size) { - lantern__cufft_set_plan_cache_max_size_intt_intt(device_index.get(), max_size.get()); +XPtrTorchTensor cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { + auto r_out = lantern_index_out_tensor_tensor_constcliststdoptionaltensor(out.get(), self.get(), indices.get()); +return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__cufft_clear_plan_cache_device_index_int64_t (XPtrTorchint64_t device_index) { - lantern__cufft_clear_plan_cache_intt(device_index.get()); +XPtrTorchTensor cpp_torch_namespace__unsafe_index_self_Tensor_indices_constc10ListstdoptionalTensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { + auto r_out = lantern__unsafe_index_tensor_constcliststdoptionaltensor(self.get(), indices.get()); +return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_index_self_Tensor_indices_constc10Listc10optionalTensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { - auto r_out = lantern_index_tensor_constclistcoptionaltensor(self.get(), indices.get()); +XPtrTorchTensor cpp_torch_namespace__unsafe_masked_index_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_fill_Scalar (XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchOptionalIndexTensorList indices, XPtrTorchScalar fill) { + auto r_out = lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(self.get(), mask.get(), indices.get(), fill.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_index_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices) { - auto r_out = lantern_index_out_tensor_tensor_constclistcoptionaltensor(out.get(), self.get(), indices.get()); +XPtrTorchTensor cpp_torch_namespace__unsafe_masked_index_put_accumulate_self_Tensor_mask_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values) { + auto r_out = lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(self.get(), mask.get(), indices.get(), values.get()); return XPtrTorchTensor(r_out); } @@ -7363,20 +7528,26 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_index_put__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { - auto r_out = lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); +XPtrTorchTensor cpp_torch_namespace_index_put__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_index_put_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { - auto r_out = lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); +XPtrTorchTensor cpp_torch_namespace__unsafe_index_put_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self.get(), indices.get(), values.get(), accumulate.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { - auto r_out = lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); +XPtrTorchTensor cpp_torch_namespace__index_put_impl__self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { + auto r_out = lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); return XPtrTorchTensor(r_out); } @@ -7566,6 +7737,12 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_rms_norm_input_Tensor_normalized_shape_IntArrayRef (XPtrTorchTensor input, XPtrTorchIntArrayRef normalized_shape, XPtrTorchOptionalTensor weight, XPtrTorchOptionaldouble eps) { + auto r_out = lantern_rms_norm_tensor_intarrayref_tensor_double(input.get(), normalized_shape.get(), weight.get(), eps.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_nan_to_num_self_Tensor (XPtrTorchTensor self, XPtrTorchOptionaldouble nan, XPtrTorchOptionaldouble posinf, XPtrTorchOptionaldouble neginf) { auto r_out = lantern_nan_to_num_tensor_double_double_double(self.get(), nan.get(), posinf.get(), neginf.get()); @@ -7629,6 +7806,68 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__cslt_compress_input_Tensor (XPtrTorchTensor input) { + auto r_out = lantern__cslt_compress_tensor(input.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__cslt_sparse_mm_compressed_A_Tensor_dense_B_Tensor (XPtrTorchTensor compressed_A, XPtrTorchTensor dense_B, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor alpha, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool transpose_result, XPtrTorchint64_t alg_id) { + auto r_out = lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(compressed_A.get(), dense_B.get(), bias.get(), alpha.get(), out_dtype.get(), transpose_result.get(), alg_id.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchint64_t cpp_torch_namespace__cslt_sparse_mm_search_compressed_A_Tensor_dense_B_Tensor (XPtrTorchTensor compressed_A, XPtrTorchTensor dense_B, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor alpha, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool transpose_result) { + auto r_out = lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(compressed_A.get(), dense_B.get(), bias.get(), alpha.get(), out_dtype.get(), transpose_result.get()); +return XPtrTorchint64_t(r_out); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__sparse_semi_structured_tile_input_Tensor (XPtrTorchTensor input, XPtrTorchstring_view algorithm, XPtrTorchbool use_cutlass) { + auto r_out = lantern__sparse_semi_structured_tile_tensor_cstringview_bool(input.get(), algorithm.get(), use_cutlass.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 4))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__sparse_semi_structured_apply_input_Tensor_thread_masks_Tensor (XPtrTorchTensor input, XPtrTorchTensor thread_masks) { + auto r_out = lantern__sparse_semi_structured_apply_tensor_tensor(input.get(), thread_masks.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_apply_dense_input_Tensor_thread_masks_Tensor (XPtrTorchTensor input, XPtrTorchTensor thread_masks) { + auto r_out = lantern__sparse_semi_structured_apply_dense_tensor_tensor(input.get(), thread_masks.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_linear_input_Tensor_weight_Tensor_meta_Tensor (XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor meta, XPtrTorchOptionalTensor bias, XPtrTorchoptional_string_view activation, XPtrTorchoptional_scalar_type out_dtype) { + auto r_out = lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(input.get(), weight.get(), meta.get(), bias.get(), activation.get(), out_dtype.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_mm_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor (XPtrTorchTensor mat1, XPtrTorchTensor mat1_meta, XPtrTorchTensor mat2, XPtrTorchoptional_scalar_type out_dtype) { + auto r_out = lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(mat1.get(), mat1_meta.get(), mat2.get(), out_dtype.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__sparse_semi_structured_addmm_input_Tensor_mat1_Tensor_mat1_meta_Tensor_mat2_Tensor (XPtrTorchTensor input, XPtrTorchTensor mat1, XPtrTorchTensor mat1_meta, XPtrTorchTensor mat2, XPtrTorchScalar alpha, XPtrTorchScalar beta, XPtrTorchoptional_scalar_type out_dtype) { + auto r_out = lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(input.get(), mat1.get(), mat1_meta.get(), mat2.get(), alpha.get(), beta.get(), out_dtype.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__mixed_dtypes_linear_input_Tensor_weight_Tensor_scale_Tensor (XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor scale, XPtrTorchOptionalTensor bias, XPtrTorchoptional_string_view activation) { + auto r_out = lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(input.get(), weight.get(), scale.get(), bias.get(), activation.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_fbgemm_linear_int8_weight_fp32_activation_input_Tensor_weight_Tensor_packed_Tensor_col_offsets_Tensor_weight_scale_Scalar_weight_zero_point_Scalar_bias_Tensor (XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchTensor packed, XPtrTorchTensor col_offsets, XPtrTorchScalar weight_scale, XPtrTorchScalar weight_zero_point, XPtrTorchTensor bias) { auto r_out = lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(input.get(), weight.get(), packed.get(), col_offsets.get(), weight_scale.get(), weight_zero_point.get(), bias.get()); @@ -7654,6 +7893,18 @@ XPtrTorchTensor cpp_torch_namespace_fbgemm_pack_gemm_matrix_fp16_input_Tensor (X return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__wrapped_linear_prepack_weight_Tensor_weight_scale_Tensor_weight_zero_point_Tensor_bias_Tensor (XPtrTorchTensor weight, XPtrTorchTensor weight_scale, XPtrTorchTensor weight_zero_point, XPtrTorchTensor bias) { + auto r_out = lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(weight.get(), weight_scale.get(), weight_zero_point.get(), bias.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__wrapped_quantized_linear_prepacked_input_Tensor_input_scale_Tensor_input_zero_point_Tensor_packed_weight_Tensor_output_scale_Tensor_output_zero_point_Tensor_out_channel_int64_t (XPtrTorchTensor input, XPtrTorchTensor input_scale, XPtrTorchTensor input_zero_point, XPtrTorchTensor packed_weight, XPtrTorchTensor output_scale, XPtrTorchTensor output_zero_point, XPtrTorchint64_t out_channel) { + auto r_out = lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(input.get(), input_scale.get(), input_zero_point.get(), packed_weight.get(), output_scale.get(), output_zero_point.get(), out_channel.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_fbgemm_linear_fp16_weight_fp32_activation_input_Tensor_packed_weight_Tensor_bias_Tensor (XPtrTorchTensor input, XPtrTorchTensor packed_weight, XPtrTorchTensor bias) { auto r_out = lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(input.get(), packed_weight.get(), bias.get()); @@ -7702,12 +7953,48 @@ XPtrTorchTensor cpp_torch_namespace_linspace_start_Scalar_end_Scalar_steps_int64 return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_start_Tensor_end_Tensor_steps_int64_t (XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options) { + auto r_out = lantern_linspace_tensor_tensor_intt_tensoroptions(start.get(), end.get(), steps.get(), options.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_start_Tensor_end_Scalar_steps_int64_t (XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options) { + auto r_out = lantern_linspace_tensor_scalar_intt_tensoroptions(start.get(), end.get(), steps.get(), options.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_start_Scalar_end_Tensor_steps_int64_t (XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchTensorOptions options) { + auto r_out = lantern_linspace_scalar_tensor_intt_tensoroptions(start.get(), end.get(), steps.get(), options.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t (XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchScalar end, XPtrTorchint64_t steps) { auto r_out = lantern_linspace_out_tensor_scalar_scalar_intt(out.get(), start.get(), end.get(), steps.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t (XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps) { + auto r_out = lantern_linspace_out_tensor_tensor_tensor_intt(out.get(), start.get(), end.get(), steps.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t (XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps) { + auto r_out = lantern_linspace_out_tensor_tensor_scalar_intt(out.get(), start.get(), end.get(), steps.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_linspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t (XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps) { + auto r_out = lantern_linspace_out_tensor_scalar_tensor_intt(out.get(), start.get(), end.get(), steps.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_log_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_log_tensor(self.get()); @@ -7858,12 +8145,48 @@ XPtrTorchTensor cpp_torch_namespace_logspace_start_Scalar_end_Scalar_steps_int64 return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_start_Tensor_end_Tensor_steps_int64_t (XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options) { + auto r_out = lantern_logspace_tensor_tensor_intt_double_tensoroptions(start.get(), end.get(), steps.get(), base.get(), options.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_start_Tensor_end_Scalar_steps_int64_t (XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options) { + auto r_out = lantern_logspace_tensor_scalar_intt_double_tensoroptions(start.get(), end.get(), steps.get(), base.get(), options.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_start_Scalar_end_Tensor_steps_int64_t (XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base, XPtrTorchTensorOptions options) { + auto r_out = lantern_logspace_scalar_tensor_intt_double_tensoroptions(start.get(), end.get(), steps.get(), base.get(), options.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Scalar_steps_int64_t (XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base) { auto r_out = lantern_logspace_out_tensor_scalar_scalar_intt_double(out.get(), start.get(), end.get(), steps.get(), base.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Tensor_steps_int64_t (XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base) { + auto r_out = lantern_logspace_out_tensor_tensor_tensor_intt_double(out.get(), start.get(), end.get(), steps.get(), base.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Tensor_end_Scalar_steps_int64_t (XPtrTorchTensor out, XPtrTorchTensor start, XPtrTorchScalar end, XPtrTorchint64_t steps, XPtrTorchdouble base) { + auto r_out = lantern_logspace_out_tensor_tensor_scalar_intt_double(out.get(), start.get(), end.get(), steps.get(), base.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_logspace_out_out_Tensor_start_Scalar_end_Tensor_steps_int64_t (XPtrTorchTensor out, XPtrTorchScalar start, XPtrTorchTensor end, XPtrTorchint64_t steps, XPtrTorchdouble base) { + auto r_out = lantern_logspace_out_tensor_scalar_tensor_intt_double(out.get(), start.get(), end.get(), steps.get(), base.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_log_softmax_self_Tensor_dim_int64_t (XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchoptional_scalar_type dtype) { auto r_out = lantern_log_softmax_tensor_intt_scalartype(self.get(), dim.get(), dtype.get()); @@ -8162,6 +8485,12 @@ XPtrTorchTensor cpp_torch_namespace_quantized_max_pool2d_self_Tensor_kernel_size return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_quantized_max_pool3d_self_Tensor_kernel_size_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode) { + auto r_out = lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self.get(), kernel_size.get(), stride.get(), padding.get(), dilation.get(), ceil_mode.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_max_pool3d_self_Tensor_kernel_size_IntArrayRef (XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode) { auto r_out = lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self.get(), kernel_size.get(), stride.get(), padding.get(), dilation.get(), ceil_mode.get()); @@ -8174,6 +8503,12 @@ XPtrTorchTensor cpp_torch_namespace_mean_self_Tensor (XPtrTorchTensor self, XPtr return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_mean_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype) { + auto r_out = lantern_mean_out_tensor_tensor_scalartype(out.get(), self.get(), dtype.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_mean_self_Tensor_dim_IntArrayRef (XPtrTorchTensor self, XPtrTorchOptionalIndexIntArrayRef dim, XPtrTorchbool keepdim, XPtrTorchoptional_scalar_type dtype) { auto r_out = lantern_mean_tensor_intarrayref_bool_scalartype(self.get(), dim.get(), keepdim.get(), dtype.get()); @@ -8421,6 +8756,36 @@ XPtrTorchTensor cpp_torch_namespace_mm_out_out_Tensor_self_Tensor_mat2_Tensor (X return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__int_mm_self_Tensor_mat2_Tensor (XPtrTorchTensor self, XPtrTorchTensor mat2) { + auto r_out = lantern__int_mm_tensor_tensor(self.get(), mat2.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__int_mm_out_out_Tensor_self_Tensor_mat2_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mat2) { + auto r_out = lantern__int_mm_out_tensor_tensor_tensor(out.get(), self.get(), mat2.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__convert_weight_to_int4pack_self_Tensor_innerKTiles_int64_t (XPtrTorchTensor self, XPtrTorchint64_t innerKTiles) { + auto r_out = lantern__convert_weight_to_int4pack_tensor_intt(self.get(), innerKTiles.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__weight_int4pack_mm_self_Tensor_mat2_Tensor_qGroupSize_int64_t_qScaleAndZeros_Tensor (XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchint64_t qGroupSize, XPtrTorchTensor qScaleAndZeros) { + auto r_out = lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(self.get(), mat2.get(), qGroupSize.get(), qScaleAndZeros.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__weight_int8pack_mm_self_Tensor_mat2_Tensor_scales_Tensor (XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scales) { + auto r_out = lantern__weight_int8pack_mm_tensor_tensor_tensor(self.get(), mat2.get(), scales.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__sparse_mm_sparse_Tensor_dense_Tensor (XPtrTorchTensor sparse, XPtrTorchTensor dense) { auto r_out = lantern__sparse_mm_tensor_tensor(sparse.get(), dense.get()); @@ -8572,6 +8937,13 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__native_batch_norm_legit_no_training_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); +} + // [[Rcpp::export(rng=false)]] Rcpp::List cpp_torch_namespace__native_batch_norm_legit_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_training_bool_momentum_double_eps_double (XPtrTorchTensor out, XPtrTorchTensor save_mean, XPtrTorchTensor save_invstd, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchbool training, XPtrTorchdouble momentum, XPtrTorchdouble eps) { auto r_out = lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out.get(), save_mean.get(), save_invstd.get(), input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), training.get(), momentum.get(), eps.get()); @@ -8641,8 +9013,8 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor mean_dy, XPtrTorchTensor mean_dy_xmu, XPtrTorchTensor count) { - auto r_out = lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out.get(), input.get(), mean.get(), invstd.get(), weight.get(), mean_dy.get(), mean_dy_xmu.get(), count.get()); +XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor sum_dy, XPtrTorchTensor sum_dy_xmu, XPtrTorchTensor count) { + auto r_out = lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out.get(), input.get(), mean.get(), invstd.get(), weight.get(), sum_dy.get(), sum_dy_xmu.get(), count.get()); return XPtrTorchTensor(r_out); } @@ -9536,6 +9908,12 @@ XPtrTorchTensor cpp_torch_namespace_slice_backward_grad_output_Tensor_input_size return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_slice_inverse_self_Tensor_src_Tensor (XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step) { + auto r_out = lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(self.get(), src.get(), dim.get(), start.get(), end.get(), step.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_slice_scatter_self_Tensor_src_Tensor (XPtrTorchTensor self, XPtrTorchTensor src, XPtrTorchindex_int64_t dim, XPtrTorchoptional_int64_t start, XPtrTorchoptional_int64_t end, XPtrTorchint64_t step) { auto r_out = lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(self.get(), src.get(), dim.get(), start.get(), end.get(), step.get()); @@ -9710,6 +10088,18 @@ XPtrTorchTensor cpp_torch_namespace_sspaddmm_out_out_Tensor_self_Tensor_mat1_Ten return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__chunk_cat_tensors_TensorList_dim_int64_t_num_chunks_int64_t (XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim, XPtrTorchint64_t num_chunks) { + auto r_out = lantern__chunk_cat_tensorlist_intt_intt(tensors.get(), dim.get(), num_chunks.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__chunk_cat_out_out_Tensor_tensors_TensorList_dim_int64_t_num_chunks_int64_t (XPtrTorchTensor out, XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim, XPtrTorchint64_t num_chunks) { + auto r_out = lantern__chunk_cat_out_tensor_tensorlist_intt_intt(out.get(), tensors.get(), dim.get(), num_chunks.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_stack_tensors_TensorList (XPtrTorchTensorList tensors, XPtrTorchindex_int64_t dim) { auto r_out = lantern_stack_tensorlist_intt(tensors.get(), dim.get()); @@ -10165,17 +10555,84 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef (XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets) { - auto r_out = lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor (XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets) { + auto r_out = lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor (XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets) { + auto r_out = lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_self_Tensor_offsets_Tensor_dummy_Tensor (XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen) { + auto r_out = lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(self.get(), offsets.get(), dummy.get(), lengths.get(), ragged_idx.get(), min_seqlen.get(), max_seqlen.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_copy_self_Tensor_offsets_Tensor_dummy_Tensor (XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen) { + auto r_out = lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(self.get(), offsets.get(), dummy.get(), lengths.get(), ragged_idx.get(), min_seqlen.get(), max_seqlen.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_values_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_values_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_values_copy_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_values_copy_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_offsets_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_offsets_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_lengths_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_lengths_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchint64_t cpp_torch_namespace__nested_get_ragged_idx_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_ragged_idx_tensor(self.get()); +return XPtrTorchint64_t(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_min_seqlen_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_min_seqlen_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_max_seqlen_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__nested_get_max_seqlen_tensor(self.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef (XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets) { - auto r_out = lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +XPtrTorchTensor cpp_torch_namespace__nested_get_jagged_dummy_any_Tensor (XPtrTorchTensor any) { + auto r_out = lantern__nested_get_jagged_dummy_tensor(any.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__nested_compute_contiguous_strides_offsets_nested_size_Tensor (XPtrTorchTensor nested_size) { + auto r_out = lantern__nested_compute_contiguous_strides_offsets_tensor(nested_size.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__trilinear_i1_Tensor_i2_Tensor_i3_Tensor_expand1_IntArrayRef_expand2_IntArrayRef_expand3_IntArrayRef_sumdim_IntArrayRef (XPtrTorchTensor i1, XPtrTorchTensor i2, XPtrTorchTensor i3, XPtrTorchIntArrayRef expand1, XPtrTorchIntArrayRef expand2, XPtrTorchIntArrayRef expand3, XPtrTorchIntArrayRef sumdim, XPtrTorchint64_t unroll_dim) { auto r_out = lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(i1.get(), i2.get(), i3.get(), expand1.get(), expand2.get(), expand3.get(), sumdim.get(), unroll_dim.get()); @@ -10481,6 +10938,34 @@ XPtrTorchTensor cpp_torch_namespace_native_norm_self_Tensor_p_Scalar_dim_IntArra return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__batch_norm_with_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__batch_norm_with_update_out_out_Tensor_save_mean_Tensor_save_invstd_Tensor_reserve_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor out, XPtrTorchTensor save_mean, XPtrTorchTensor save_invstd, XPtrTorchTensor reserve, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out.get(), save_mean.get(), save_invstd.get(), reserve.get(), input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__batch_norm_no_update_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace_batch_norm_backward_grad_out_Tensor_input_Tensor_weight_Tensor_running_mean_Tensor_running_var_Tensor_save_mean_Tensor_save_var_Tensor_update_bool_eps_double_output_mask_stdarraybool3_reserve_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor weight, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchOptionalTensor save_mean, XPtrTorchOptionalTensor save_var, XPtrTorchbool update, XPtrTorchdouble eps, std::vector output_mask, XPtrTorchTensor reserve) { + auto r_out = lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(grad_out.get(), input.get(), weight.get(), running_mean.get(), running_var.get(), save_mean.get(), save_var.get(), update.get(), eps.get(), reinterpret_cast(&output_mask), reserve.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__sparse_sum_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern__sparse_sum_tensor(self.get()); @@ -10833,6 +11318,24 @@ XPtrTorchTensor cpp_torch_namespace__addmm_activation_self_Tensor_mat1_Tensor_ma return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__scaled_mm_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor (XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scale_a, XPtrTorchTensor scale_b, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor scale_result, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool use_fast_accum) { + auto r_out = lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(self.get(), mat2.get(), scale_a.get(), scale_b.get(), bias.get(), scale_result.get(), out_dtype.get(), use_fast_accum.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__scaled_mm_out_out_Tensor_self_Tensor_mat2_Tensor_scale_a_Tensor_scale_b_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mat2, XPtrTorchTensor scale_a, XPtrTorchTensor scale_b, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor scale_result, XPtrTorchoptional_scalar_type out_dtype, XPtrTorchbool use_fast_accum) { + auto r_out = lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(out.get(), self.get(), mat2.get(), scale_a.get(), scale_b.get(), bias.get(), scale_result.get(), out_dtype.get(), use_fast_accum.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__sparse_compressed_tensor_with_dims_nnz_int64_t_dense_dim_int64_t_size_IntArrayRef_blocksize_IntArrayRef_index_dtype_ScalarType_options_TensorOptions (XPtrTorchint64_t nnz, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef blocksize, XPtrTorchDtype index_dtype, XPtrTorchTensorOptions options) { + auto r_out = lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(nnz.get(), dense_dim.get(), size.get(), blocksize.get(), index_dtype.get(), options.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_sparse_compressed_tensor_compressed_indices_Tensor_plain_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions (XPtrTorchTensor compressed_indices, XPtrTorchTensor plain_indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options) { auto r_out = lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(compressed_indices.get(), plain_indices.get(), values.get(), size.get(), options.get()); @@ -10930,26 +11433,26 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options) { - auto r_out = lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(indices.get(), values.get(), options.get()); +XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_options_TensorOptions (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced) { + auto r_out = lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(indices.get(), values.get(), options.get(), is_coalesced.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options) { - auto r_out = lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(indices.get(), values.get(), size.get(), options.get()); +XPtrTorchTensor cpp_torch_namespace_sparse_coo_tensor_indices_Tensor_values_Tensor_size_IntArrayRef_options_TensorOptions (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced) { + auto r_out = lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(indices.get(), values.get(), size.get(), options.get(), is_coalesced.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options) { - auto r_out = lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(indices.get(), values.get(), size.get(), options.get()); +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_unsafe_indices_Tensor_values_Tensor_size_IntArrayRef (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced) { + auto r_out = lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(indices.get(), values.get(), size.get(), options.get(), is_coalesced.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size) { - lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(indices.get(), values.get(), size.get()); +void cpp_torch_namespace__validate_sparse_coo_tensor_args_indices_Tensor_values_Tensor_size_IntArrayRef (XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchIntArrayRef size, XPtrTorchoptional_bool is_coalesced) { + lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(indices.get(), values.get(), size.get(), is_coalesced.get()); } // [[Rcpp::export(rng=false)]] @@ -10984,8 +11487,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions (XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options) { - auto r_out = lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(sparse_dim.get(), dense_dim.get(), size.get(), indices.get(), values.get(), options.get()); +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor_options_TensorOptions (XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchTensorOptions options, XPtrTorchoptional_bool is_coalesced) { + auto r_out = lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(sparse_dim.get(), dense_dim.get(), size.get(), indices.get(), values.get(), options.get(), is_coalesced.get()); return XPtrTorchTensor(r_out); } @@ -10996,8 +11499,8 @@ return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor (XPtrTorchTensor grad, XPtrTorchTensor input) { - auto r_out = lantern_to_dense_backward_tensor_tensor(grad.get(), input.get()); +XPtrTorchTensor cpp_torch_namespace_to_dense_backward_grad_Tensor_input_Tensor (XPtrTorchTensor grad, XPtrTorchTensor input, XPtrTorchoptional_bool masked_grad) { + auto r_out = lantern_to_dense_backward_tensor_tensor_bool(grad.get(), input.get(), masked_grad.get()); return XPtrTorchTensor(r_out); } @@ -11037,6 +11540,13 @@ XPtrTorchTensorList cpp_torch_namespace_unbind_self_Tensor_dim_Dimname (XPtrTorc return XPtrTorchTensorList(r_out); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__to_sparse_semi_structured_dense_Tensor (XPtrTorchTensor dense) { + auto r_out = lantern__to_sparse_semi_structured_tensor(dense.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv2d_weight_self_Tensor (XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size) { auto r_out = lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self.get(), padding.get(), stride.get(), dilation.get(), groups.get(), input_size.get()); @@ -11044,8 +11554,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor (XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups) { - auto r_out = lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(self.get(), padding.get(), stride.get(), dilation.get(), groups.get()); +XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_self_Tensor (XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size) { + auto r_out = lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self.get(), padding.get(), stride.get(), dilation.get(), groups.get(), input_size.get()); return XPtrTorchTensor(r_out); } @@ -11310,8 +11820,8 @@ return XPtrTorchScalarType(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchbool cpp_torch_namespace_can_cast_from_ScalarType_to_ScalarType (XPtrTorchDtype from, XPtrTorchDtype to) { - auto r_out = lantern_can_cast_scalartype_scalartype(from.get(), to.get()); +XPtrTorchbool cpp_torch_namespace_can_cast_from__ScalarType_to_ScalarType (XPtrTorchDtype from_, XPtrTorchDtype to) { + auto r_out = lantern_can_cast_scalartype_scalartype(from_.get(), to.get()); return XPtrTorchbool(r_out); } @@ -11335,7 +11845,7 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool (XPtrTorchTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first) { +Rcpp::List cpp_torch_namespace_lstm_mps_backward_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool (XPtrTorchOptionalTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first) { auto r_out = lantern_lstm_mps_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensorlist_tensorlist_bool_intt_double_bool_bool_bool(grad_y.get(), grad_hy.get(), grad_cy.get(), z_state.get(), cell_state_fwd.get(), input.get(), layersOutputs.get(), hx.get(), params.get(), has_biases.get(), num_layers.get(), dropout.get(), train.get(), bidirectional.get(), batch_first.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2))); @@ -11552,6 +12062,12 @@ XPtrTorchTensor cpp_torch_namespace_masked_scatter_self_Tensor_mask_Tensor_sourc return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_masked_scatter_backward_grad_output_Tensor_mask_Tensor_sizes_IntArrayRef (XPtrTorchTensor grad_output, XPtrTorchTensor mask, XPtrTorchIntArrayRef sizes) { + auto r_out = lantern_masked_scatter_backward_tensor_tensor_intarrayref(grad_output.get(), mask.get(), sizes.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__masked_softmax_self_Tensor_mask_Tensor (XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchoptional_index_int64_t dim, XPtrTorchoptional_int64_t mask_type) { auto r_out = lantern__masked_softmax_tensor_tensor_intt_intt(self.get(), mask.get(), dim.get(), mask_type.get()); @@ -12356,6 +12872,18 @@ XPtrTorchTensor cpp_torch_namespace_nonzero_self_Tensor (XPtrTorchTensor self) { return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_nonzero_static_out_out_Tensor_self_Tensor_size_int64_t (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value) { + auto r_out = lantern_nonzero_static_out_tensor_tensor_intt_intt(out.get(), self.get(), size.get(), fill_value.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_nonzero_static_self_Tensor_size_int64_t (XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchint64_t fill_value) { + auto r_out = lantern_nonzero_static_tensor_intt_intt(self.get(), size.get(), fill_value.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_namespace_nonzero_numpy_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_nonzero_numpy_tensor(self.get()); @@ -12961,6 +13489,12 @@ XPtrTorchTensor cpp_torch_namespace_min_self_Tensor (XPtrTorchTensor self) { return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_min_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self) { + auto r_out = lantern_min_out_tensor_tensor(out.get(), self.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_fmin_self_Tensor_other_Tensor (XPtrTorchTensor self, XPtrTorchTensor other) { auto r_out = lantern_fmin_tensor_tensor(self.get(), other.get()); @@ -13173,6 +13707,12 @@ XPtrTorchTensor cpp_torch_namespace_argsort_self_Tensor_stable_bool_dim_int64_t return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool stable, XPtrTorchindex_int64_t dim, XPtrTorchbool descending) { + auto r_out = lantern_argsort_out_tensor_tensor_bool_intt_bool(out.get(), self.get(), stable.get(), dim.get(), descending.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_argsort_self_Tensor_dim_Dimname (XPtrTorchTensor self, XPtrTorchDimname dim, XPtrTorchbool descending) { auto r_out = lantern_argsort_tensor_dimname_bool(self.get(), dim.get(), descending.get()); @@ -13372,113 +13912,124 @@ void cpp_torch_namespace__foreach_add__self_TensorList_scalar_Scalar (XPtrTorchT } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_sub_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + auto r_out = lantern__foreach_add_tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_sub__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + lantern__foreach_add__tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_mul_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_add_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_mul__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_add__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_div_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha) { + auto r_out = lantern__foreach_add_tensorlist_tensor_scalar(self.get(), other.get(), alpha.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_div__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add__self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha) { + lantern__foreach_add__tensorlist_tensor_scalar(self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_clamp_min_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_sub_tensorlist_scalar(self.get(), scalar.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_clamp_min__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_sub__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_clamp_max_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + auto r_out = lantern__foreach_sub_tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_clamp_max__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + lantern__foreach_sub__tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_maximum_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_sub_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_maximum__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_sub__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - auto r_out = lantern__foreach_minimum_tensorlist_scalar(self.get(), scalar.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_mul_tensorlist_scalar(self.get(), scalar.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_minimum__tensorlist_scalar(self.get(), scalar.get()); +void cpp_torch_namespace__foreach_mul__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_mul__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - auto r_out = lantern__foreach_add_tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + auto r_out = lantern__foreach_mul_tensorlist_tensorlist(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_add__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - lantern__foreach_add__tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); +void cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_mul__tensorlist_tensorlist(self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - auto r_out = lantern__foreach_sub_tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_mul_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - lantern__foreach_sub__tensorlist_tensorlist_scalar(self.get(), other.get(), alpha.get()); +void cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_mul__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - auto r_out = lantern__foreach_mul_tensorlist_tensorlist(self.get(), other.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other) { + auto r_out = lantern__foreach_mul_tensorlist_tensor(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_mul__tensorlist_tensorlist(self.get(), other.get()); +void cpp_torch_namespace__foreach_mul__self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other) { + lantern__foreach_mul__tensorlist_tensor(self.get(), other.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_div_tensorlist_scalar(self.get(), scalar.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_div__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_div__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] @@ -13493,113 +14044,113 @@ void cpp_torch_namespace__foreach_div__self_TensorList_other_TensorList (XPtrTor } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - auto r_out = lantern__foreach_clamp_min_tensorlist_tensorlist(self.get(), other.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_div_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_clamp_min__tensorlist_tensorlist(self.get(), other.get()); +void cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_div__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - auto r_out = lantern__foreach_clamp_max_tensorlist_tensorlist(self.get(), other.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other) { + auto r_out = lantern__foreach_div_tensorlist_tensor(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_clamp_max__tensorlist_tensorlist(self.get(), other.get()); +void cpp_torch_namespace__foreach_div__self_TensorList_other_Tensor (XPtrTorchTensorList self, XPtrTorchTensor other) { + lantern__foreach_div__tensorlist_tensor(self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - auto r_out = lantern__foreach_maximum_tensorlist_tensorlist(self.get(), other.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_clamp_max_tensorlist_scalar(self.get(), scalar.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_maximum__tensorlist_tensorlist(self.get(), other.get()); +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_clamp_max__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - auto r_out = lantern__foreach_minimum_tensorlist_tensorlist(self.get(), other.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + auto r_out = lantern__foreach_clamp_max_tensorlist_tensorlist(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_minimum__tensorlist_tensorlist(self.get(), other.get()); +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_clamp_max__tensorlist_tensorlist(self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_add_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_add_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_clamp_max_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_add__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_add__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_clamp_max__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sub_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_sub_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_clamp_min_tensorlist_scalar(self.get(), scalar.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_sub__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_clamp_min__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_div_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_div_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + auto r_out = lantern__foreach_clamp_min_tensorlist_tensorlist(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_div__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_div__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_clamp_min__tensorlist_tensorlist(self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_mul_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_mul_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self.get(), scalars.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_mul__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_min_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_maximum_tensorlist_scalar(self.get(), scalar.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_min__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_maximum__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_maximum__tensorlist_scalar(self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_clamp_max_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_clamp_max_tensorlist_arrayrefscalar(self.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_maximum_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + auto r_out = lantern__foreach_maximum_tensorlist_tensorlist(self.get(), other.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max__self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_clamp_max__tensorlist_arrayrefscalar(self.get(), scalars.get()); +void cpp_torch_namespace__foreach_maximum__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_maximum__tensorlist_tensorlist(self.get(), other.get()); } // [[Rcpp::export(rng=false)]] @@ -13613,6 +14164,28 @@ void cpp_torch_namespace__foreach_maximum__self_TensorList_scalars_ArrayRefScala lantern__foreach_maximum__tensorlist_arrayrefscalar(self.get(), scalars.get()); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + auto r_out = lantern__foreach_minimum_tensorlist_scalar(self.get(), scalar.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_minimum__self_TensorList_scalar_Scalar (XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_minimum__tensorlist_scalar(self.get(), scalar.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + auto r_out = lantern__foreach_minimum_tensorlist_tensorlist(self.get(), other.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_minimum__self_TensorList_other_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_minimum__tensorlist_tensorlist(self.get(), other.get()); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_namespace__foreach_minimum_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { auto r_out = lantern__foreach_minimum_tensorlist_arrayrefscalar(self.get(), scalars.get()); @@ -13625,30 +14198,69 @@ void cpp_torch_namespace__foreach_minimum__self_TensorList_scalars_ArrayRefScala } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_exp_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_exp_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_zero__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_zero__tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_exp__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_exp__tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sqrt_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_sqrt_tensorlist(self.get()); +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sqrt__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_sqrt__tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] @@ -13750,6 +14362,17 @@ void cpp_torch_namespace__foreach_erfc__self_TensorList (XPtrTorchTensorList sel lantern__foreach_erfc__tensorlist(self.get()); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_exp_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_exp_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_exp__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_exp__tensorlist(self.get()); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_namespace__foreach_expm1_self_TensorList (XPtrTorchTensorList self) { auto r_out = lantern__foreach_expm1_tensorlist(self.get()); @@ -13772,6 +14395,50 @@ void cpp_torch_namespace__foreach_floor__self_TensorList (XPtrTorchTensorList se lantern__foreach_floor__tensorlist(self.get()); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_frac_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_frac_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_frac__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_frac__tensorlist(self.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { + auto r_out = lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self.get(), tensors1.get(), weights.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { + lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self.get(), tensors1.get(), weights.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { + auto r_out = lantern__foreach_lerp_tensorlist_tensorlist_scalar(self.get(), tensors1.get(), weight.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { + lantern__foreach_lerp__tensorlist_tensorlist_scalar(self.get(), tensors1.get(), weight.get()); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_lgamma_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_lgamma_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lgamma__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_lgamma__tensorlist(self.get()); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_namespace__foreach_log_self_TensorList (XPtrTorchTensorList self) { auto r_out = lantern__foreach_log_tensorlist(self.get()); @@ -13816,6 +14483,12 @@ void cpp_torch_namespace__foreach_log2__self_TensorList (XPtrTorchTensorList sel lantern__foreach_log2__tensorlist(self.get()); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensorList cpp_torch_namespace__foreach_max_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_max_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensorList cpp_torch_namespace__foreach_neg_self_TensorList (XPtrTorchTensorList self) { auto r_out = lantern__foreach_neg_tensorlist(self.get()); @@ -13828,91 +14501,70 @@ void cpp_torch_namespace__foreach_neg__self_TensorList (XPtrTorchTensorList self } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_tan_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_tan_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_norm_self_TensorList (XPtrTorchTensorList self, XPtrTorchScalar ord, XPtrTorchoptional_scalar_type dtype) { + auto r_out = lantern__foreach_norm_tensorlist_scalar_scalartype(self.get(), ord.get(), dtype.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_tan__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_tan__tensorlist(self.get()); -} - -// [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_tanh_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_tanh_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList exponent) { + auto r_out = lantern__foreach_pow_tensorlist_tensorlist(self.get(), exponent.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_tanh__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_tanh__tensorlist(self.get()); -} - -// [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sin_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_sin_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_Scalar (XPtrTorchTensorList self, XPtrTorchScalar exponent) { + auto r_out = lantern__foreach_pow_tensorlist_scalar(self.get(), exponent.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sin__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_sin__tensorlist(self.get()); -} - -// [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_sinh_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_sinh_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_TensorList_exponent_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent) { + auto r_out = lantern__foreach_pow_tensorlist_arrayrefscalar(self.get(), exponent.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sinh__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_sinh__tensorlist(self.get()); -} - -// [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_round_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_round_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_pow_self_Scalar_exponent_TensorList (XPtrTorchScalar self, XPtrTorchTensorList exponent) { + auto r_out = lantern__foreach_pow_scalar_tensorlist(self.get(), exponent.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_round__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_round__tensorlist(self.get()); +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList exponent) { + lantern__foreach_pow__tensorlist_tensorlist(self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_lgamma_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_lgamma_tensorlist(self.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_Scalar (XPtrTorchTensorList self, XPtrTorchScalar exponent) { + lantern__foreach_pow__tensorlist_scalar(self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lgamma__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_lgamma__tensorlist(self.get()); +void cpp_torch_namespace__foreach_pow__self_TensorList_exponent_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent) { + lantern__foreach_pow__tensorlist_arrayrefscalar(self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_frac_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_frac_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_reciprocal_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_reciprocal_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_frac__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_frac__tensorlist(self.get()); +void cpp_torch_namespace__foreach_reciprocal__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_reciprocal__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_reciprocal_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_reciprocal_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_round_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_round_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_reciprocal__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_reciprocal__tensorlist(self.get()); +void cpp_torch_namespace__foreach_round__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_round__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] @@ -13927,108 +14579,96 @@ void cpp_torch_namespace__foreach_sigmoid__self_TensorList (XPtrTorchTensorList } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_trunc_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_trunc_tensorlist(self.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sign_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_sign_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_trunc__self_TensorList (XPtrTorchTensorList self) { - lantern__foreach_trunc__tensorlist(self.get()); -} - -// [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); -} - -// [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); +void cpp_torch_namespace__foreach_sign__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_sign__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sin_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_sin_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_sin__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_sin__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sinh_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_sinh_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul__self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_sinh__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_sinh__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_sqrt_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_sqrt_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self.get(), tensor1.get(), tensor2.get(), value.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_sqrt__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_sqrt__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_tan_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_tan_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcdiv_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - auto r_out = lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_tan__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_tan__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self.get(), tensor1.get(), tensor2.get(), scalars.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_tanh_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_tanh_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_addcmul_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - auto r_out = lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self.get(), tensor1.get(), tensor2.get(), scalars.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_tanh__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_tanh__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_norm_self_TensorList (XPtrTorchTensorList self, XPtrTorchScalar ord) { - auto r_out = lantern__foreach_norm_tensorlist_scalar(self.get(), ord.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_trunc_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_trunc_tensorlist(self.get()); return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { - auto r_out = lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self.get(), tensors1.get(), weights.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_trunc__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_trunc__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { - lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self.get(), tensors1.get(), weights.get()); +void cpp_torch_namespace__foreach_zero__self_TensorList (XPtrTorchTensorList self) { + lantern__foreach_zero__tensorlist(self.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_lerp_self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { - auto r_out = lantern__foreach_lerp_tensorlist_tensorlist_scalar(self.get(), tensors1.get(), weight.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_copy__self_TensorList_src_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking) { + lantern__foreach_copy__tensorlist_tensorlist_bool(self.get(), src.get(), non_blocking.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lerp__self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { - lantern__foreach_lerp__tensorlist_tensorlist_scalar(self.get(), tensors1.get(), weight.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_copy_self_TensorList_src_TensorList (XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking) { + auto r_out = lantern__foreach_copy_tensorlist_tensorlist_bool(self.get(), src.get(), non_blocking.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] @@ -14067,6 +14707,12 @@ XPtrTorchTensor cpp_torch_namespace_searchsorted_sorted_sequence_Tensor_self_Sca return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar (XPtrTorchTensor out, XPtrTorchTensor sorted_sequence, XPtrTorchScalar self, XPtrTorchbool out_int32, XPtrTorchbool right, XPtrTorchoptional_string_view side, XPtrTorchOptionalTensor sorter) { + auto r_out = lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out.get(), sorted_sequence.get(), self.get(), out_int32.get(), right.get(), side.get(), sorter.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__convert_indices_from_coo_to_csr_self_Tensor_size_int64_t (XPtrTorchTensor self, XPtrTorchint64_t size, XPtrTorchbool out_int32) { auto r_out = lantern__convert_indices_from_coo_to_csr_tensor_intt_bool(self.get(), size.get(), out_int32.get()); @@ -16645,6 +17291,12 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__linalg_eigvals_self_Tensor (XPtrTorchTensor self) { + auto r_out = lantern__linalg_eigvals_tensor(self.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_linalg_eigvals_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_linalg_eigvals_tensor(self.get()); @@ -16989,6 +17641,12 @@ XPtrTorchTensor cpp_torch_namespace_linalg_solve_A_Tensor_B_Tensor (XPtrTorchTen return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__spsolve_A_Tensor_B_Tensor (XPtrTorchTensor A, XPtrTorchTensor B, XPtrTorchbool left) { + auto r_out = lantern__spsolve_tensor_tensor_bool(A.get(), B.get(), left.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_linalg_solve_out_out_Tensor_A_Tensor_B_Tensor (XPtrTorchTensor out, XPtrTorchTensor A, XPtrTorchTensor B, XPtrTorchbool left) { auto r_out = lantern_linalg_solve_out_tensor_tensor_tensor_bool(out.get(), A.get(), B.get(), left.get()); @@ -17117,6 +17775,12 @@ XPtrTorchTensor cpp_torch_namespace__test_serialization_subcmul_self_Tensor_othe return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__test_parallel_materialize_self_Tensor_num_parallel_int64_t (XPtrTorchTensor self, XPtrTorchint64_t num_parallel, XPtrTorchbool skip_first) { + auto r_out = lantern__test_parallel_materialize_tensor_intt_bool(self.get(), num_parallel.get(), skip_first.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__test_optional_intlist_values_Tensor_addends_IntArrayRef (XPtrTorchTensor values, XPtrTorchOptionalIntArrayRef addends) { auto r_out = lantern__test_optional_intlist_tensor_intarrayref(values.get(), addends.get()); @@ -17178,8 +17842,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_pad_sequence_sequences_TensorList (XPtrTorchTensorList sequences, XPtrTorchbool batch_first, XPtrTorchdouble padding_value) { - auto r_out = lantern_pad_sequence_tensorlist_bool_double(sequences.get(), batch_first.get(), padding_value.get()); +XPtrTorchTensor cpp_torch_namespace_pad_sequence_sequences_TensorList (XPtrTorchTensorList sequences, XPtrTorchbool batch_first, XPtrTorchdouble padding_value, XPtrTorchstring_view padding_side) { + auto r_out = lantern_pad_sequence_tensorlist_bool_double_cstringview(sequences.get(), batch_first.get(), padding_value.get(), padding_side.get()); return XPtrTorchTensor(r_out); } @@ -17432,12 +18096,30 @@ XPtrTorchTensor cpp_torch_namespace_alias_copy_self_Tensor (XPtrTorchTensor self return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__jagged_to_padded_dense_forward_values_Tensor_offsets_TensorList_max_lengths_IntArrayRef (XPtrTorchTensor values, XPtrTorchTensorList offsets, XPtrTorchIntArrayRef max_lengths, XPtrTorchdouble padding_value) { + auto r_out = lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(values.get(), offsets.get(), max_lengths.get(), padding_value.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__padded_dense_to_jagged_forward_dense_Tensor_offsets_TensorList (XPtrTorchTensor dense, XPtrTorchTensorList offsets, XPtrTorchoptional_int64_t total_L) { + auto r_out = lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(dense.get(), offsets.get(), total_L.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__nested_tensor_softmax_with_shape_self_Tensor_query_Tensor (XPtrTorchTensor self, XPtrTorchTensor query) { auto r_out = lantern__nested_tensor_softmax_with_shape_tensor_tensor(self.get(), query.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__safe_softmax_self_Tensor_dim_int64_t (XPtrTorchTensor self, XPtrTorchindex_int64_t dim, XPtrTorchoptional_scalar_type dtype) { + auto r_out = lantern__safe_softmax_tensor_intt_scalartype(self.get(), dim.get(), dtype.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__transformer_encoder_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor (XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchoptional_int64_t mask_type) { auto r_out = lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(src.get(), embed_dim.get(), num_heads.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), use_gelu.get(), norm_first.get(), eps.get(), norm_weight_1.get(), norm_bias_1.get(), norm_weight_2.get(), norm_bias_2.get(), ffn_weight_1.get(), ffn_bias_1.get(), ffn_weight_2.get(), ffn_bias_2.get(), mask.get(), mask_type.get()); @@ -17452,99 +18134,141 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal) { - auto r_out = lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get()); +XPtrTorchTensor cpp_torch_namespace_scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa) { + auto r_out = lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get(), scale.get(), enable_gqa.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool need_attn_weights, XPtrTorchbool is_causal) { - auto r_out = lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), need_attn_weights.get(), is_causal.get()); +XPtrTorchint64_t cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa) { + auto r_out = lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get(), scale.get(), enable_gqa.get()); +return XPtrTorchint64_t(r_out); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask, XPtrTorchOptionaldouble scale, XPtrTorchbool enable_gqa) { + auto r_out = lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get(), dropout_mask.get(), scale.get(), enable_gqa.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); } // [[Rcpp::export(rng=false)]] -XPtrTorchint64_t cpp_torch_namespace__fused_sdp_choice_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal) { - auto r_out = lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get()); -return XPtrTorchint64_t(r_out); +Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_for_mps_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get(), dropout_mask.get(), scale.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_attention_math_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_mask, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor dropout_mask) { - auto r_out = lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(query.get(), key.get(), value.get(), attn_mask.get(), dropout_p.get(), is_causal.get(), dropout_mask.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(query.get(), key.get(), value.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get(), scale.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 4)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 5)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 6)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 7)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 8))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor attn_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(query.get(), key.get(), value.get(), dropout_p.get(), is_causal.get(), attn_mask.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask) { - auto r_out = lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(query.get(), key.get(), value.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(query.get(), key.get(), value.get(), attn_bias.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 4)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 5)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 6)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 7)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 8))); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 4)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 5)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 6)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 7)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 8))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchint64_t philox_seed, XPtrTorchint64_t philox_offset) { - auto r_out = lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), philox_seed.get(), philox_offset.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), philox_seed.get(), philox_offset.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_compute_log_sumexp_bool (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchbool compute_log_sumexp, XPtrTorchbool is_causal) { - auto r_out = lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(query.get(), key.get(), value.get(), compute_log_sumexp.get(), is_causal.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_flash_attention_for_cpu_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_dropout_p_double_is_causal_bool (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionalTensor attn_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), dropout_p.get(), is_causal.get(), attn_mask.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor (XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchbool is_causal, XPtrTorchbool chunk_grad_outputs) { - auto r_out = lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), is_causal.get(), chunk_grad_outputs.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_fused_attention_overrideable_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_grad_input_mask_stdarraybool4_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor attn_bias, std::vector grad_input_mask, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out.get(), query.get(), key.get(), value.get(), attn_bias.get(), reinterpret_cast(&grad_input_mask), out.get(), logsumexp.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), philox_seed.get(), philox_offset.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); } // [[Rcpp::export(rng=false)]] -XPtrTorchbool cpp_torch_namespace__chunk_grad_outputs_efficient_attention_query_Tensor_key_Tensor_value_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchbool is_causal) { - auto r_out = lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(query.get(), key.get(), value.get(), is_causal.get()); -return XPtrTorchbool(r_out); +Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchbool compute_log_sumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(query.get(), key.get(), value.get(), attn_bias.get(), compute_log_sumexp.get(), dropout_p.get(), is_causal.get(), scale.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__scaled_dot_product_efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_dropout_p_double_grad_input_mask_stdarraybool4 (XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor attn_bias, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchdouble dropout_p, std::vector grad_input_mask, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(grad_out_.get(), query.get(), key.get(), value.get(), attn_bias.get(), out.get(), logsumexp.get(), philox_seed.get(), philox_offset.get(), dropout_p.get(), reinterpret_cast(&grad_input_mask), is_causal.get(), scale.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask) { - auto r_out = lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(query.get(), key.get(), value.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_cudnn_attention_query_Tensor_key_Tensor_value_Tensor_attn_bias_Tensor_compute_log_sumexp_bool (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor attn_bias, XPtrTorchbool compute_log_sumexp, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(query.get(), key.get(), value.get(), attn_bias.get(), compute_log_sumexp.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 2)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 4))); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 4)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 5)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 6)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 7)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 8))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_int64_t_philox_offset_int64_t (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchint64_t philox_seed, XPtrTorchint64_t philox_offset) { - auto r_out = lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), philox_seed.get(), philox_offset.get()); +Rcpp::List cpp_torch_namespace__scaled_dot_product_cudnn_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_philox_seed_Tensor_philox_offset_Tensor_attn_bias_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchTensor attn_bias, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchOptionaldouble scale) { + auto r_out = lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), philox_seed.get(), philox_offset.get(), attn_bias.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), scale.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchoptional_int64_t max_seqlen_q, XPtrTorchbool compute_log_sumexp, XPtrTorchbool causal) { - auto r_out = lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(query.get(), key.get(), value.get(), cu_seqlens_q.get(), cu_seqlens_k.get(), max_seqlen_q.get(), compute_log_sumexp.get(), causal.get()); +Rcpp::List cpp_torch_namespace__flash_attention_forward_query_Tensor_key_Tensor_value_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_return_debug_mask_bool (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor cum_seq_q, XPtrTorchOptionalTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchbool return_debug_mask, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t window_size_left, XPtrTorchoptional_int64_t window_size_right, XPtrTorchOptionalTensor seqused_k, XPtrTorchOptionalTensor alibi_slopes) { + auto r_out = lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(query.get(), key.get(), value.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), return_debug_mask.get(), scale.get(), window_size_left.get(), window_size_right.get(), seqused_k.get(), alibi_slopes.get()); auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1))); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 4))); } // [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor (XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchbool is_causal, XPtrTorchbool chunk_grad_outputs) { - auto r_out = lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), is_causal.get(), chunk_grad_outputs.get()); +Rcpp::List cpp_torch_namespace__flash_attention_backward_grad_out_Tensor_query_Tensor_key_Tensor_value_Tensor_out_Tensor_logsumexp_Tensor_cum_seq_q_Tensor_cum_seq_k_Tensor_max_q_int64_t_max_k_int64_t_dropout_p_double_is_causal_bool_philox_seed_Tensor_philox_offset_Tensor (XPtrTorchTensor grad_out, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchTensor out, XPtrTorchTensor logsumexp, XPtrTorchTensor cum_seq_q, XPtrTorchTensor cum_seq_k, XPtrTorchint64_t max_q, XPtrTorchint64_t max_k, XPtrTorchdouble dropout_p, XPtrTorchbool is_causal, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t window_size_left, XPtrTorchoptional_int64_t window_size_right) { + auto r_out = lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(grad_out.get(), query.get(), key.get(), value.get(), out.get(), logsumexp.get(), cum_seq_q.get(), cum_seq_k.get(), max_q.get(), max_k.get(), dropout_p.get(), is_causal.get(), philox_seed.get(), philox_offset.get(), scale.get(), window_size_left.get(), window_size_right.get()); auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__efficient_attention_forward_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_dropout_p_double_custom_mask_type_int64_t (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchoptional_int64_t max_seqlen_q, XPtrTorchoptional_int64_t max_seqlen_k, XPtrTorchdouble dropout_p, XPtrTorchint64_t custom_mask_type, XPtrTorchbool compute_log_sumexp, XPtrTorchOptionaldouble scale, XPtrTorchOptionalTensor seqlen_k, XPtrTorchoptional_int64_t window_size) { + auto r_out = lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(query.get(), key.get(), value.get(), bias.get(), cu_seqlens_q.get(), cu_seqlens_k.get(), max_seqlen_q.get(), max_seqlen_k.get(), dropout_p.get(), custom_mask_type.get(), compute_log_sumexp.get(), scale.get(), seqlen_k.get(), window_size.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 4)),XPtrTorchint64_t(lantern_vector_get(wrap.get(), 5))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__efficient_attention_backward_grad_out__Tensor_query_Tensor_key_Tensor_value_Tensor_bias_Tensor_out_Tensor_cu_seqlens_q_Tensor_cu_seqlens_k_Tensor_max_seqlen_q_int64_t_max_seqlen_k_int64_t_logsumexp_Tensor_dropout_p_double_philox_seed_Tensor_philox_offset_Tensor_custom_mask_type_int64_t_bias_requires_grad_bool (XPtrTorchTensor grad_out_, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchOptionalTensor bias, XPtrTorchTensor out, XPtrTorchOptionalTensor cu_seqlens_q, XPtrTorchOptionalTensor cu_seqlens_k, XPtrTorchint64_t max_seqlen_q, XPtrTorchint64_t max_seqlen_k, XPtrTorchTensor logsumexp, XPtrTorchdouble dropout_p, XPtrTorchTensor philox_seed, XPtrTorchTensor philox_offset, XPtrTorchint64_t custom_mask_type, XPtrTorchbool bias_requires_grad, XPtrTorchOptionaldouble scale, XPtrTorchoptional_int64_t num_splits_key, XPtrTorchoptional_int64_t window_size, XPtrTorchbool shared_storage_dqdkdv) { + auto r_out = lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(grad_out_.get(), query.get(), key.get(), value.get(), bias.get(), out.get(), cu_seqlens_q.get(), cu_seqlens_k.get(), max_seqlen_q.get(), max_seqlen_k.get(), logsumexp.get(), dropout_p.get(), philox_seed.get(), philox_offset.get(), custom_mask_type.get(), bias_requires_grad.get(), scale.get(), num_splits_key.get(), window_size.get(), shared_storage_dqdkdv.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__triton_scaled_dot_attention_q_Tensor_k_Tensor_v_Tensor (XPtrTorchTensor q, XPtrTorchTensor k, XPtrTorchTensor v, XPtrTorchdouble dropout_p) { auto r_out = lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(q.get(), k.get(), v.get(), dropout_p.get()); return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__fill_mem_eff_dropout_mask__self_Tensor_dropout_p_double_seed_int64_t_offset_int64_t (XPtrTorchTensor self, XPtrTorchdouble dropout_p, XPtrTorchint64_t seed, XPtrTorchint64_t offset) { + auto r_out = lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(self.get(), dropout_p.get(), seed.get(), offset.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__triton_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask) { auto r_out = lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(query.get(), key.get(), value.get(), embed_dim.get(), num_head.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), mask.get()); @@ -17563,20 +18287,6 @@ XPtrTorchTensor cpp_torch_namespace_special_airy_ai_out_out_Tensor_x_Tensor (XPt return XPtrTorchTensor(r_out); } -// [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__transformer_decoder_only_layer_fwd_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor (XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value) { - auto r_out = lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(src.get(), embed_dim.get(), num_heads.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), use_gelu.get(), norm_first.get(), eps.get(), norm_weight_1.get(), norm_bias_1.get(), norm_weight_2.get(), norm_bias_2.get(), ffn_weight_1.get(), ffn_bias_1.get(), ffn_weight_2.get(), ffn_bias_2.get(), mask.get(), incr_key.get(), incr_value.get()); -auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); -} - -// [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__native_decoder_only_multi_head_attention_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor (XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value, XPtrTorchbool need_weights, XPtrTorchbool average_attn_weights) { - auto r_out = lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(query.get(), key.get(), value.get(), embed_dim.get(), num_head.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), mask.get(), incr_key.get(), incr_value.get(), need_weights.get(), average_attn_weights.get()); -auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); -} - // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_special_bessel_j0_self_Tensor (XPtrTorchTensor self) { auto r_out = lantern_special_bessel_j0_tensor(self.get()); @@ -18152,11 +18862,41 @@ void cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_ lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adam__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adamw__self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_sgd__self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adagrad__self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self.get(), grads.get(), state_sums.get(), state_steps.get(), lr.get(), lr_decay.get(), weight_decay.get(), eps.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__propagate_xla_data_input_Tensor_output_Tensor (XPtrTorchTensor input, XPtrTorchTensor output) { + lantern__propagate_xla_data_tensor_tensor(input.get(), output.get()); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__new_zeros_with_same_feature_meta_out_out_Tensor_self_Tensor_other_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor other, XPtrTorchint64_t self_num_batch_dims) { auto r_out = lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(out.get(), self.get(), other.get(), self_num_batch_dims.get()); @@ -18244,6 +18984,12 @@ XPtrTorchTensor cpp_torch_namespace_affine_grid_generator_out_out_Tensor_theta_T return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__test_functorch_fallback_out_out_Tensor_self_Tensor_other_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor other) { + auto r_out = lantern__test_functorch_fallback_out_tensor_tensor_tensor(out.get(), self.get(), other.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_bartlett_window_out_out_Tensor_window_length_int64_t (XPtrTorchTensor out, XPtrTorchint64_t window_length) { auto r_out = lantern_bartlett_window_out_tensor_intt(out.get(), window_length.get()); @@ -18410,12 +19156,6 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); } -// [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32) { - auto r_out = lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out.get(), self.get(), weight.get(), padding.get(), stride.get(), dilation.get(), groups.get(), benchmark.get(), deterministic.get(), allow_tf32.get()); -return XPtrTorchTensor(r_out); -} - // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_cudnn_convolution_transpose_out_out_Tensor_self_Tensor_weight_Tensor_padding_IntArrayRef_output_padding_IntArrayRef_stride_IntArrayRef_dilation_IntArrayRef_groups_int64_t_benchmark_bool_deterministic_bool_allow_tf32_bool (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor weight, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef output_padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchbool benchmark, XPtrTorchbool deterministic, XPtrTorchbool allow_tf32) { auto r_out = lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out.get(), self.get(), weight.get(), padding.get(), output_padding.get(), stride.get(), dilation.get(), groups.get(), benchmark.get(), deterministic.get(), allow_tf32.get()); @@ -18560,6 +19300,12 @@ XPtrTorchTensor cpp_torch_namespace_empty_out_out_Tensor_size_IntArrayRef_names_ return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_empty_permuted_out_out_Tensor_size_IntArrayRef_physical_layout_IntArrayRef (XPtrTorchTensor out, XPtrTorchIntArrayRef size, XPtrTorchIntArrayRef physical_layout) { + auto r_out = lantern_empty_permuted_out_tensor_intarrayref_intarrayref(out.get(), size.get(), physical_layout.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_new_empty_out_out_Tensor_self_Tensor_size_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef size) { auto r_out = lantern_new_empty_out_tensor_tensor_intarrayref(out.get(), self.get(), size.get()); @@ -18656,6 +19402,12 @@ XPtrTorchTensor cpp_torch_namespace_fill_out_out_Tensor_self_Tensor_value_Tensor return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_floor_divide_out_out_Tensor_self_Tensor_other_Scalar (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchScalar other) { + auto r_out = lantern_floor_divide_out_tensor_tensor_scalar(out.get(), self.get(), other.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_full_out_out_Tensor_size_IntArrayRef_fill_value_Scalar_names_DimnameList (XPtrTorchTensor out, XPtrTorchIntArrayRef size, XPtrTorchScalar fill_value, XPtrTorchOptionalDimnameList names) { auto r_out = lantern_full_out_tensor_intarrayref_scalar_dimnamelist(out.get(), size.get(), fill_value.get(), names.get()); @@ -18775,20 +19527,20 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { - auto r_out = lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(out.get(), self.get(), indices.get(), values.get(), accumulate.get()); +XPtrTorchTensor cpp_torch_namespace_index_put_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate) { + auto r_out = lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(out.get(), self.get(), indices.get(), values.get(), accumulate.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { - auto r_out = lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(out.get(), self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); +XPtrTorchTensor cpp_torch_namespace__index_put_impl_out_out_Tensor_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { + auto r_out = lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(out.get(), self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10Listc10optionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { - auto r_out = lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); +XPtrTorchTensor cpp_torch_namespace__index_put_impl_self_Tensor_indices_constc10ListstdoptionalTensor_values_Tensor (XPtrTorchTensor self, XPtrTorchOptionalIndexTensorList indices, XPtrTorchTensor values, XPtrTorchbool accumulate, XPtrTorchbool unsafe) { + auto r_out = lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(self.get(), indices.get(), values.get(), accumulate.get(), unsafe.get()); return XPtrTorchTensor(r_out); } @@ -18908,6 +19660,12 @@ XPtrTorchTensor cpp_torch_namespace_quantized_max_pool2d_out_out_Tensor_self_Ten return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace_quantized_max_pool3d_out_out_Tensor_self_Tensor_kernel_size_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef kernel_size, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef dilation, XPtrTorchbool ceil_mode) { + auto r_out = lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out.get(), self.get(), kernel_size.get(), stride.get(), padding.get(), dilation.get(), ceil_mode.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_median_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self) { auto r_out = lantern_median_out_tensor_tensor(out.get(), self.get()); @@ -19016,6 +19774,13 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 4))); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__native_batch_norm_legit_no_training_out_out0_Tensor_out1_Tensor_out2_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0.get(), out1.get(), out2.get(), input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); +} + // [[Rcpp::export(rng=false)]] Rcpp::List cpp_torch_namespace_batch_norm_stats_out_out0_Tensor_out1_Tensor_input_Tensor_eps_double (XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor input, XPtrTorchdouble eps) { auto r_out = lantern_batch_norm_stats_out_tensor_tensor_tensor_double(out0.get(), out1.get(), input.get(), eps.get()); @@ -19052,8 +19817,8 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_mean_dy_Tensor_mean_dy_xmu_Tensor_count_Tensor (XPtrTorchTensor out, XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor mean_dy, XPtrTorchTensor mean_dy_xmu, XPtrTorchTensor count) { - auto r_out = lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out.get(), grad_out.get(), input.get(), mean.get(), invstd.get(), weight.get(), mean_dy.get(), mean_dy_xmu.get(), count.get()); +XPtrTorchTensor cpp_torch_namespace_batch_norm_backward_elemt_out_out_Tensor_grad_out_Tensor_input_Tensor_mean_Tensor_invstd_Tensor_weight_Tensor_sum_dy_Tensor_sum_dy_xmu_Tensor_count_Tensor (XPtrTorchTensor out, XPtrTorchTensor grad_out, XPtrTorchTensor input, XPtrTorchTensor mean, XPtrTorchTensor invstd, XPtrTorchOptionalTensor weight, XPtrTorchTensor sum_dy, XPtrTorchTensor sum_dy_xmu, XPtrTorchTensor count) { + auto r_out = lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out.get(), grad_out.get(), input.get(), mean.get(), invstd.get(), weight.get(), sum_dy.get(), sum_dy_xmu.get(), count.get()); return XPtrTorchTensor(r_out); } @@ -19333,6 +20098,12 @@ XPtrTorchTensor cpp_torch_namespace__nested_tensor_strides_out_out_Tensor_self_T return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_tensor_storage_offsets_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self) { + auto r_out = lantern__nested_tensor_storage_offsets_out_tensor_tensor(out.get(), self.get()); +return XPtrTorchTensor(r_out); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__nested_from_padded_and_nested_example_out_out_Tensor_padded_Tensor_nt_example_Tensor (XPtrTorchTensor out, XPtrTorchTensor padded, XPtrTorchTensor nt_example) { auto r_out = lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(out.get(), padded.get(), nt_example.get()); @@ -19340,8 +20111,20 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchIntArrayRef offsets) { - auto r_out = lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(out.get(), self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +XPtrTorchTensor cpp_torch_namespace__nested_view_from_buffer_copy_out_out_Tensor_self_Tensor_nested_size_Tensor_nested_strides_Tensor_offsets_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor nested_size, XPtrTorchTensor nested_strides, XPtrTorchTensor offsets) { + auto r_out = lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(out.get(), self.get(), nested_size.get(), nested_strides.get(), offsets.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_view_from_jagged_copy_out_out_Tensor_self_Tensor_offsets_Tensor_dummy_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor offsets, XPtrTorchTensor dummy, XPtrTorchOptionalTensor lengths, XPtrTorchint64_t ragged_idx, XPtrTorchOptionalTensor min_seqlen, XPtrTorchOptionalTensor max_seqlen) { + auto r_out = lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(out.get(), self.get(), offsets.get(), dummy.get(), lengths.get(), ragged_idx.get(), min_seqlen.get(), max_seqlen.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__nested_get_values_copy_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self) { + auto r_out = lantern__nested_get_values_copy_out_tensor_tensor(out.get(), self.get()); return XPtrTorchTensor(r_out); } @@ -19472,6 +20255,20 @@ XPtrTorchTensor cpp_torch_namespace_native_norm_out_out_Tensor_self_Tensor_p_Sca return XPtrTorchTensor(r_out); } +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__batch_norm_with_update_functional_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchTensor running_mean, XPtrTorchTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 4)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 5))); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__batch_norm_no_update_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_input_Tensor_weight_Tensor_bias_Tensor_running_mean_Tensor_running_var_Tensor_momentum_double_eps_double (XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor out3, XPtrTorchTensor input, XPtrTorchOptionalTensor weight, XPtrTorchOptionalTensor bias, XPtrTorchOptionalTensor running_mean, XPtrTorchOptionalTensor running_var, XPtrTorchdouble momentum, XPtrTorchdouble eps) { + auto r_out = lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0.get(), out1.get(), out2.get(), out3.get(), input.get(), weight.get(), bias.get(), running_mean.get(), running_var.get(), momentum.get(), eps.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); +} + // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__sparse_sum_out_out_Tensor_self_Tensor_dim_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIndexIntArrayRef dim) { auto r_out = lantern__sparse_sum_out_tensor_tensor_intarrayref(out.get(), self.get(), dim.get()); @@ -19617,8 +20414,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor (XPtrTorchTensor out, XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values) { - auto r_out = lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(out.get(), sparse_dim.get(), dense_dim.get(), size.get(), indices.get(), values.get()); +XPtrTorchTensor cpp_torch_namespace__sparse_coo_tensor_with_dims_and_tensors_out_out_Tensor_sparse_dim_int64_t_dense_dim_int64_t_size_IntArrayRef_indices_Tensor_values_Tensor (XPtrTorchTensor out, XPtrTorchint64_t sparse_dim, XPtrTorchint64_t dense_dim, XPtrTorchIntArrayRef size, XPtrTorchIndexTensor indices, XPtrTorchTensor values, XPtrTorchoptional_bool is_coalesced) { + auto r_out = lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(out.get(), sparse_dim.get(), dense_dim.get(), size.get(), indices.get(), values.get(), is_coalesced.get()); return XPtrTorchTensor(r_out); } @@ -19653,8 +20450,14 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype) { - auto r_out = lantern__to_dense_out_tensor_tensor_scalartype(out.get(), self.get(), dtype.get()); +XPtrTorchTensor cpp_torch_namespace__sparse_mask_projection_out_out_Tensor_self_Tensor_mask_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchTensor mask, XPtrTorchbool accumulate_matches) { + auto r_out = lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(out.get(), self.get(), mask.get(), accumulate_matches.get()); +return XPtrTorchTensor(r_out); +} + +// [[Rcpp::export(rng=false)]] +XPtrTorchTensor cpp_torch_namespace__to_dense_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_scalar_type dtype, XPtrTorchoptional_bool masked_grad) { + auto r_out = lantern__to_dense_out_tensor_tensor_scalartype_bool(out.get(), self.get(), dtype.get(), masked_grad.get()); return XPtrTorchTensor(r_out); } @@ -19689,38 +20492,38 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t sparse_dim) { - auto r_out = lantern_to_sparse_out_tensor_tensor_intt(out.get(), self.get(), sparse_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor_sparse_dim_int64_t (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchint64_t sparse_dim) { + auto r_out = lantern__to_sparse_out_tensor_tensor_intt(out.get(), self.get(), sparse_dim.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { - auto r_out = lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(out.get(), self.get(), layout.get(), blocksize.get(), dense_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchLayout layout, XPtrTorchOptionalIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(out.get(), self.get(), layout.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_csr_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { - auto r_out = lantern_to_sparse_csr_out_tensor_tensor_intt(out.get(), self.get(), dense_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_csr_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern__to_sparse_csr_out_tensor_tensor_intt(out.get(), self.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_csc_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { - auto r_out = lantern_to_sparse_csc_out_tensor_tensor_intt(out.get(), self.get(), dense_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_csc_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern__to_sparse_csc_out_tensor_tensor_intt(out.get(), self.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { - auto r_out = lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out.get(), self.get(), blocksize.get(), dense_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_bsr_out_out_Tensor_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out.get(), self.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { - auto r_out = lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out.get(), self.get(), blocksize.get(), dense_dim.get()); +XPtrTorchTensor cpp_torch_namespace__to_sparse_bsc_out_out_Tensor_self_Tensor_blocksize_IntArrayRef (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef blocksize, XPtrTorchoptional_int64_t dense_dim) { + auto r_out = lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out.get(), self.get(), blocksize.get(), dense_dim.get()); return XPtrTorchTensor(r_out); } @@ -19737,8 +20540,8 @@ return XPtrTorchTensor(r_out); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups) { - auto r_out = lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(out.get(), self.get(), padding.get(), stride.get(), dilation.get(), groups.get()); +XPtrTorchTensor cpp_torch_namespace_mkldnn_reorder_conv3d_weight_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchIntArrayRef padding, XPtrTorchIntArrayRef stride, XPtrTorchIntArrayRef dilation, XPtrTorchint64_t groups, XPtrTorchOptionalIntArrayRef input_size) { + auto r_out = lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(out.get(), self.get(), padding.get(), stride.get(), dilation.get(), groups.get(), input_size.get()); return XPtrTorchTensor(r_out); } @@ -19873,7 +20676,7 @@ return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPt } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool (XPtrTorchTensor out0, XPtrTorchTensorList out1, XPtrTorchTensorList out2, XPtrTorchTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first) { +void cpp_torch_namespace_lstm_mps_backward_out_out0_Tensor_out1_TensorList_out2_TensorList_grad_y_Tensor_grad_hy_Tensor_grad_cy_Tensor_z_state_Tensor_cell_state_fwd_Tensor_input_Tensor_layersOutputs_Tensor_hx_TensorList_params_TensorList_has_biases_bool_num_layers_int64_t_dropout_double_train_bool_bidirectional_bool_batch_first_bool (XPtrTorchTensor out0, XPtrTorchTensorList out1, XPtrTorchTensorList out2, XPtrTorchOptionalTensor grad_y, XPtrTorchOptionalTensor grad_hy, XPtrTorchOptionalTensor grad_cy, XPtrTorchTensor z_state, XPtrTorchTensor cell_state_fwd, XPtrTorchTensor input, XPtrTorchTensor layersOutputs, XPtrTorchTensorList hx, XPtrTorchTensorList params, XPtrTorchbool has_biases, XPtrTorchint64_t num_layers, XPtrTorchdouble dropout, XPtrTorchbool train, XPtrTorchbool bidirectional, XPtrTorchbool batch_first) { lantern_lstm_mps_backward_out_tensor_tensorlist_tensorlist_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensorlist_tensorlist_bool_intt_double_bool_bool_bool(out0.get(), out1.get(), out2.get(), grad_y.get(), grad_hy.get(), grad_cy.get(), z_state.get(), cell_state_fwd.get(), input.get(), layersOutputs.get(), hx.get(), params.get(), has_biases.get(), num_layers.get(), dropout.get(), train.get(), bidirectional.get(), batch_first.get()); } @@ -20223,12 +21026,6 @@ XPtrTorchTensor cpp_torch_namespace_remainder_out_out_Tensor_self_Scalar_other_T return XPtrTorchTensor(r_out); } -// [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_argsort_out_out_Tensor_self_Tensor_stable_bool (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool stable, XPtrTorchindex_int64_t dim, XPtrTorchbool descending) { - auto r_out = lantern_argsort_out_tensor_tensor_bool_intt_bool(out.get(), self.get(), stable.get(), dim.get(), descending.get()); -return XPtrTorchTensor(r_out); -} - // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_unfold_backward_out_out_Tensor_grad_in_Tensor_input_sizes_IntArrayRef_dim_int64_t_size_int64_t_step_int64_t (XPtrTorchTensor out, XPtrTorchTensor grad_in, XPtrTorchIntArrayRef input_sizes, XPtrTorchindex_int64_t dim, XPtrTorchint64_t size, XPtrTorchint64_t step) { auto r_out = lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(out.get(), grad_in.get(), input_sizes.get(), dim.get(), size.get(), step.get()); @@ -20272,98 +21069,98 @@ void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalar_ } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_div_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other, XPtrTorchScalar alpha) { + lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(out.get(), self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { + lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), other.get(), alpha.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { - lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), other.get(), alpha.get()); +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other, XPtrTorchScalar alpha) { - lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), other.get(), alpha.get()); +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_other_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other) { + lantern__foreach_mul_out_tensorlist_tensorlist_tensor(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_div_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_other_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensor other) { + lantern__foreach_div_out_tensorlist_tensorlist_tensor(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { - lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_add_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sub_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { + lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_div_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_mul_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); +void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] @@ -20372,8 +21169,13 @@ void cpp_torch_namespace__foreach_clamp_min_out_out_TensorList_self_TensorList_s } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_clamp_max_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { - lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); +void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); } // [[Rcpp::export(rng=false)]] @@ -20381,30 +21183,49 @@ void cpp_torch_namespace__foreach_maximum_out_out_TensorList_self_TensorList_sca lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalar_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar scalar) { + lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out.get(), self.get(), scalar.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_other_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList other) { + lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), other.get()); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__foreach_minimum_out_out_TensorList_self_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar scalars) { lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_exp_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensor1.get(), tensor2.get(), value.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_zero_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -XPtrTorchTensorList cpp_torch_namespace__foreach_zero_self_TensorList (XPtrTorchTensorList self) { - auto r_out = lantern__foreach_zero_tensorlist(self.get()); -return XPtrTorchTensorList(r_out); +void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_sqrt_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { + lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensor1.get(), tensor2.get(), value.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { + lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { + lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); } // [[Rcpp::export(rng=false)]] @@ -20452,6 +21273,11 @@ void cpp_torch_namespace__foreach_erfc_out_out_TensorList_self_TensorList (XPtrT lantern__foreach_erfc_out_tensorlist_tensorlist(out.get(), self.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_exp_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_exp_out_tensorlist_tensorlist(out.get(), self.get()); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__foreach_expm1_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { lantern__foreach_expm1_out_tensorlist_tensorlist(out.get(), self.get()); @@ -20462,6 +21288,26 @@ void cpp_torch_namespace__foreach_floor_out_out_TensorList_self_TensorList (XPtr lantern__foreach_floor_out_tensorlist_tensorlist(out.get(), self.get()); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_frac_out_tensorlist_tensorlist(out.get(), self.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { + lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out.get(), self.get(), tensors1.get(), weights.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { + lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensors1.get(), weight.get()); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_lgamma_out_tensorlist_tensorlist(out.get(), self.get()); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__foreach_log_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { lantern__foreach_log_out_tensorlist_tensorlist(out.get(), self.get()); @@ -20483,48 +21329,43 @@ void cpp_torch_namespace__foreach_log2_out_out_TensorList_self_TensorList (XPtrT } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_neg_out_tensorlist_tensorlist(out.get(), self.get()); -} - -// [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_tan_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_max_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_max_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_tanh_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_neg_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_neg_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_sin_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar ord, XPtrTorchoptional_scalar_type dtype) { + lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(out.get(), self.get(), ord.get(), dtype.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_sinh_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList exponent) { + lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(out.get(), self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_round_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar exponent) { + lantern__foreach_pow_out_tensorlist_tensorlist_scalar(out.get(), self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lgamma_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_lgamma_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_pow_out_out_TensorList_self_TensorList_exponent_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchvector_Scalar exponent) { + lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), exponent.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_frac_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_frac_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_reciprocal_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_reciprocal_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_reciprocal_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_round_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_round_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] @@ -20533,53 +21374,54 @@ void cpp_torch_namespace__foreach_sigmoid_out_out_TensorList_self_TensorList (XP } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { - lantern__foreach_trunc_out_tensorlist_tensorlist(out.get(), self.get()); +void cpp_torch_namespace__foreach_sign_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_sign_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensor1.get(), tensor2.get(), value.get()); +void cpp_torch_namespace__foreach_sin_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_sin_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchScalar value) { - lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensor1.get(), tensor2.get(), value.get()); +void cpp_torch_namespace__foreach_sinh_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_sinh_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_sqrt_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_sqrt_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcdiv_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_tan_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_tan_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_ArrayRefScalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchvector_Scalar scalars) { - lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_tanh_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_tanh_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_addcmul_out_out_TensorList_self_TensorList_tensor1_TensorList_tensor2_TensorList_scalars_Tensor (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensor1, XPtrTorchTensorList tensor2, XPtrTorchTensor scalars) { - lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out.get(), self.get(), tensor1.get(), tensor2.get(), scalars.get()); +void cpp_torch_namespace__foreach_trunc_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_trunc_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_norm_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchScalar ord) { - lantern__foreach_norm_out_tensorlist_tensorlist_scalar(out.get(), self.get(), ord.get()); +void cpp_torch_namespace__foreach_zero_out_out_TensorList_self_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self) { + lantern__foreach_zero_out_tensorlist_tensorlist(out.get(), self.get()); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weights_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchTensorList weights) { - lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out.get(), self.get(), tensors1.get(), weights.get()); +XPtrTorchTensorList cpp_torch_namespace__foreach_zero_self_TensorList (XPtrTorchTensorList self) { + auto r_out = lantern__foreach_zero_tensorlist(self.get()); +return XPtrTorchTensorList(r_out); } // [[Rcpp::export(rng=false)]] -void cpp_torch_namespace__foreach_lerp_out_out_TensorList_self_TensorList_tensors1_TensorList_weight_Scalar (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList tensors1, XPtrTorchScalar weight) { - lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out.get(), self.get(), tensors1.get(), weight.get()); +void cpp_torch_namespace__foreach_copy_out_out_TensorList_self_TensorList_src_TensorList (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList src, XPtrTorchbool non_blocking) { + lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(out.get(), self.get(), src.get(), non_blocking.get()); } // [[Rcpp::export(rng=false)]] @@ -20588,12 +21430,6 @@ XPtrTorchTensor cpp_torch_namespace_bucketize_out_out_Tensor_self_Scalar_boundar return XPtrTorchTensor(r_out); } -// [[Rcpp::export(rng=false)]] -XPtrTorchTensor cpp_torch_namespace_searchsorted_out_out_Tensor_sorted_sequence_Tensor_self_Scalar (XPtrTorchTensor out, XPtrTorchTensor sorted_sequence, XPtrTorchScalar self, XPtrTorchbool out_int32, XPtrTorchbool right, XPtrTorchoptional_string_view side, XPtrTorchOptionalTensor sorter) { - auto r_out = lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out.get(), sorted_sequence.get(), self.get(), out_int32.get(), right.get(), side.get(), sorter.get()); -return XPtrTorchTensor(r_out); -} - // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace_glu_jvp_out_out_Tensor_glu_Tensor_x_Tensor_dx_Tensor_dim_int64_t (XPtrTorchTensor out, XPtrTorchTensor glu, XPtrTorchTensor x, XPtrTorchTensor dx, XPtrTorchindex_int64_t dim) { auto r_out = lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt(out.get(), glu.get(), x.get(), dx.get(), dim.get()); @@ -20968,20 +21804,6 @@ XPtrTorchTensor cpp_torch_namespace__triton_multi_head_attention_out_out_Tensor_ return XPtrTorchTensor(r_out); } -// [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__transformer_decoder_only_layer_fwd_out_out0_Tensor_out1_Tensor_out2_Tensor_src_Tensor_embed_dim_int64_t_num_heads_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor_use_gelu_bool_norm_first_bool_eps_double_norm_weight_1_Tensor_norm_bias_1_Tensor_norm_weight_2_Tensor_norm_bias_2_Tensor_ffn_weight_1_Tensor_ffn_bias_1_Tensor_ffn_weight_2_Tensor_ffn_bias_2_Tensor (XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor src, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_heads, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchbool use_gelu, XPtrTorchbool norm_first, XPtrTorchdouble eps, XPtrTorchTensor norm_weight_1, XPtrTorchTensor norm_bias_1, XPtrTorchTensor norm_weight_2, XPtrTorchTensor norm_bias_2, XPtrTorchTensor ffn_weight_1, XPtrTorchTensor ffn_bias_1, XPtrTorchTensor ffn_weight_2, XPtrTorchTensor ffn_bias_2, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value) { - auto r_out = lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out0.get(), out1.get(), out2.get(), src.get(), embed_dim.get(), num_heads.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), use_gelu.get(), norm_first.get(), eps.get(), norm_weight_1.get(), norm_bias_1.get(), norm_weight_2.get(), norm_bias_2.get(), ffn_weight_1.get(), ffn_bias_1.get(), ffn_weight_2.get(), ffn_bias_2.get(), mask.get(), incr_key.get(), incr_value.get()); -auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2))); -} - -// [[Rcpp::export(rng=false)]] -Rcpp::List cpp_torch_namespace__native_decoder_only_multi_head_attention_out_out0_Tensor_out1_Tensor_out2_Tensor_out3_Tensor_query_Tensor_key_Tensor_value_Tensor_embed_dim_int64_t_num_head_int64_t_qkv_weight_Tensor_qkv_bias_Tensor_proj_weight_Tensor_proj_bias_Tensor (XPtrTorchTensor out0, XPtrTorchTensor out1, XPtrTorchTensor out2, XPtrTorchTensor out3, XPtrTorchTensor query, XPtrTorchTensor key, XPtrTorchTensor value, XPtrTorchint64_t embed_dim, XPtrTorchint64_t num_head, XPtrTorchTensor qkv_weight, XPtrTorchTensor qkv_bias, XPtrTorchTensor proj_weight, XPtrTorchTensor proj_bias, XPtrTorchOptionalTensor mask, XPtrTorchOptionalTensor incr_key, XPtrTorchOptionalTensor incr_value, XPtrTorchbool need_weights, XPtrTorchbool average_attn_weights) { - auto r_out = lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(out0.get(), out1.get(), out2.get(), out3.get(), query.get(), key.get(), value.get(), embed_dim.get(), num_head.get(), qkv_weight.get(), qkv_bias.get(), proj_weight.get(), proj_bias.get(), mask.get(), incr_key.get(), incr_value.get(), need_weights.get(), average_attn_weights.get()); -auto wrap = XPtrTorchvector_void(r_out); -return Rcpp::List::create(XPtrTorchTensor(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensor(lantern_vector_get(wrap.get(), 3))); -} - // [[Rcpp::export(rng=false)]] XPtrTorchTensor cpp_torch_namespace__foobar_out_out_Tensor_self_Tensor (XPtrTorchTensor out, XPtrTorchTensor self, XPtrTorchbool arg1, XPtrTorchbool arg2, XPtrTorchbool arg3) { auto r_out = lantern__foobar_out_tensor_tensor_bool_bool_bool(out.get(), self.get(), arg1.get(), arg2.get(), arg3.get()); @@ -21000,6 +21822,18 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 4))); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adam_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out.get(), self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__fused_adam_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + auto r_out = lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 4))); +} + // [[Rcpp::export(rng=false)]] void cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_double_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(out.get(), self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); @@ -21012,3 +21846,51 @@ auto wrap = XPtrTorchvector_void(r_out); return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 4))); } +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adamw_out_out_TensorList_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out.get(), self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__fused_adamw_self_TensorList_grads_TensorList_exp_avgs_TensorList_exp_avg_sqs_TensorList_max_exp_avg_sqs_TensorList_state_steps_TensorList_lr_Tensor_beta1_double_beta2_double_weight_decay_double_eps_double_amsgrad_bool_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList exp_avgs, XPtrTorchTensorList exp_avg_sqs, XPtrTorchTensorList max_exp_avg_sqs, XPtrTorchTensorList state_steps, XPtrTorchTensor lr, XPtrTorchdouble beta1, XPtrTorchdouble beta2, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool amsgrad, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + auto r_out = lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self.get(), grads.get(), exp_avgs.get(), exp_avg_sqs.get(), max_exp_avg_sqs.get(), state_steps.get(), lr.get(), beta1.get(), beta2.get(), weight_decay.get(), eps.get(), amsgrad.get(), maximize.get(), grad_scale.get(), found_inf.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 3)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 4))); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(out.get(), self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_double_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchdouble lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + auto r_out = lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2))); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_sgd_out_out_TensorList_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(out.get(), self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__fused_sgd_self_TensorList_grads_TensorList_momentum_buffer_list_TensorList_weight_decay_double_momentum_double_lr_Tensor_dampening_double_nesterov_bool_maximize_bool_is_first_step_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList momentum_buffer_list, XPtrTorchdouble weight_decay, XPtrTorchdouble momentum, XPtrTorchTensor lr, XPtrTorchdouble dampening, XPtrTorchbool nesterov, XPtrTorchbool maximize, XPtrTorchbool is_first_step, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + auto r_out = lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self.get(), grads.get(), momentum_buffer_list.get(), weight_decay.get(), momentum.get(), lr.get(), dampening.get(), nesterov.get(), maximize.get(), is_first_step.get(), grad_scale.get(), found_inf.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2))); +} + +// [[Rcpp::export(rng=false)]] +void cpp_torch_namespace__fused_adagrad_out_out_TensorList_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool (XPtrTorchTensorList out, XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(out.get(), self.get(), grads.get(), state_sums.get(), state_steps.get(), lr.get(), lr_decay.get(), weight_decay.get(), eps.get(), maximize.get(), grad_scale.get(), found_inf.get()); +} + +// [[Rcpp::export(rng=false)]] +Rcpp::List cpp_torch_namespace__fused_adagrad_self_TensorList_grads_TensorList_state_sums_TensorList_state_steps_TensorList_lr_double_lr_decay_double_weight_decay_double_eps_double_maximize_bool (XPtrTorchTensorList self, XPtrTorchTensorList grads, XPtrTorchTensorList state_sums, XPtrTorchTensorList state_steps, XPtrTorchdouble lr, XPtrTorchdouble lr_decay, XPtrTorchdouble weight_decay, XPtrTorchdouble eps, XPtrTorchbool maximize, XPtrTorchOptionalTensor grad_scale, XPtrTorchOptionalTensor found_inf) { + auto r_out = lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self.get(), grads.get(), state_sums.get(), state_steps.get(), lr.get(), lr_decay.get(), weight_decay.get(), eps.get(), maximize.get(), grad_scale.get(), found_inf.get()); +auto wrap = XPtrTorchvector_void(r_out); +return Rcpp::List::create(XPtrTorchTensorList(lantern_vector_get(wrap.get(), 0)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 1)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 2)),XPtrTorchTensorList(lantern_vector_get(wrap.get(), 3))); +} + diff --git a/src/lantern/CMakeLists.txt b/src/lantern/CMakeLists.txt index f9c79e8de0..25539198fb 100644 --- a/src/lantern/CMakeLists.txt +++ b/src/lantern/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.19.2) project(lantern) -set(TORCH_VERSION "2.0.1") +set(TORCH_VERSION "2.5.1") if (NOT DEFINED TORCH_PATH) if (DEFINED ENV{TORCH_PATH}) @@ -19,7 +19,7 @@ if (DEFINED ENV{CUDA} AND NOT "$ENV{CUDA}" STREQUAL "") message(STATUS "CUDA VERSION: $ENV{CUDA} | ${CUDA_VERSION} | ${CUDA_VERSION_NUMBER}") set(CAFFE2_USE_CUDNN 1) - set(ENV{TORCH_CUDA_ARCH_LIST} "3.7 5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX") + set(ENV{TORCH_CUDA_ARCH_LIST} "5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6 9.0+PTX") endif() @@ -50,11 +50,7 @@ if (NOT EXISTS "${TORCH_PATH}") endif() if (APPLE) - if ('${CMAKE_HOST_SYSTEM_PROCESSOR}' STREQUAL 'x86_64') - set(TORCH_URL "https://download.pytorch.org/libtorch/cpu/libtorch-macos-${TORCH_VERSION}.zip") - elseif('${CMAKE_HOST_SYSTEM_PROCESSOR}' STREQUAL 'arm64') - set(TORCH_URL "https://github.com/mlverse/libtorch-mac-m1/releases/download/LibTorch-for-R/libtorch-v${TORCH_VERSION}.zip") - endif() + set(TORCH_URL "https://github.com/mlverse/libtorch-mac-m1/releases/download/LibTorch-for-R/libtorch-${CMAKE_HOST_SYSTEM_PROCESSOR}-v${TORCH_VERSION}.zip") elseif(WIN32) if (DEFINED CUDA_VERSION_NUMBER) set(TORCH_URL "https://download.pytorch.org/libtorch/cu${CUDA_VERSION_NUMBER}/libtorch-win-shared-with-deps-${TORCH_VERSION}%2Bcu${CUDA_VERSION_NUMBER}.zip") @@ -82,6 +78,21 @@ endif() if (DEFINED CUDA_VERSION_NUMBER) if (WIN32) find_package(CUDAToolkit) + # Workaround issue with PyTorch NVTX headers + # See https://discuss.pytorch.org/t/failed-to-find-nvtoolsext/179635/13?u=dfalbel + if(CUDA_VERSION_NUMBER VERSION_GREATER_EQUAL 12) + message(STATUS "PyTorch NVTX headers workaround: Yes") + # only do this if nvToolsExt is not defined and CUDA::nvtx3 exists + if(NOT TARGET CUDA::nvToolsExt AND TARGET CUDA::nvtx3) + add_library(CUDA::nvToolsExt INTERFACE IMPORTED) + # ensure that PyTorch is told to use NVTX3 headers + target_compile_definitions( + CUDA::nvToolsExt INTERFACE + TORCH_CUDA_USE_NVTX3 + ) + target_link_libraries(CUDA::nvToolsExt INTERFACE CUDA::nvtx3) + endif() + endif() endif() enable_language(CUDA) add_compile_definitions("CUDA${CUDA_VERSION_NUMBER}") @@ -131,7 +142,7 @@ set(LANTERN_SRC src/Compile.cpp src/Amp.cpp src/JITExecute.cpp - src/Pickler.cpp + src/Unpickler.cpp ) if(APPLE) @@ -158,7 +169,7 @@ if(DEFINED ENV{CUDA} AND NOT '$ENV{CUDA}' STREQUAL '') set_property(TARGET lantern PROPERTY CUDA_SEPARABLE_COMPILATION ON) set_property(TARGET lantern PROPERTY CUDA_STANDARD 17) else() - set_property(TARGET lantern PROPERTY CUDA_STANDARD 14) + set_property(TARGET lantern PROPERTY CUDA_STANDARD 17) endif() else() diff --git a/src/lantern/headers/declarations/declarations.yaml b/src/lantern/headers/declarations/declarations.yaml index 88688e1499..eeed871255 100644 --- a/src/lantern/headers/declarations/declarations.yaml +++ b/src/lantern/headers/declarations/declarations.yaml @@ -396,20 +396,20 @@ dynamic_type: at::Tensor is_nullable: true name: gradient - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: retain_graph - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: create_graph type: bool - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, const c10::optional &, c10::optional, bool) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -426,13 +426,13 @@ dynamic_type: at::Tensor is_nullable: true name: gradient - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: retain_graph - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -1018,8 +1018,8 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -1030,7 +1030,7 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -1063,8 +1063,8 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -1075,7 +1075,7 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -1304,12 +1304,206 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _assert_async + operator_name: _assert_async + overload_name: msg + manual_kernel_registration: false + category_override: '' + schema_string: aten::_assert_async.msg(Tensor self, str assert_msg) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + schema_order_cpp_signature: void (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _assert_scalar + operator_name: _assert_scalar + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_assert_scalar(Scalar self, str assert_msg) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + schema_order_cpp_signature: void (const at::Scalar &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_assert_scalar + operator_name: _functional_assert_scalar + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::string_view, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_assert_async + operator_name: _functional_assert_async + overload_name: msg + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _assert_tensor_metadata operator_name: _assert_tensor_metadata overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> () + schema_string: aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> () arguments: - annotation: null dynamic_type: at::Tensor @@ -1317,24 +1511,24 @@ name: a type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: size type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: stride type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: void (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: void (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -1342,23 +1536,23 @@ name: a type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: size type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: stride type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -1372,6 +1566,362 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _print + operator_name: _print + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_print(str s) -> () + arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: s + type: c10::string_view + schema_order_cpp_signature: void (c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: s + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sym_constrain_range + operator_name: sym_constrain_range + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + schema_order_cpp_signature: void (const at::Scalar &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sym_constrain_range_for_size + operator_name: sym_constrain_range_for_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + schema_order_cpp_signature: void (const at::Scalar &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_sym_constrain_range + operator_name: _functional_sym_constrain_range + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_sym_constrain_range_for_size + operator_name: _functional_sym_constrain_range_for_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_dep_token + operator_name: _make_dep_token + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: ::std::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: ::std::optional + schema_order_cpp_signature: at::Tensor (::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: refine_names operator_name: refine_names overload_name: '' @@ -1929,7 +2479,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -1939,7 +2489,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -1989,8 +2539,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const c10::optional &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const ::std::optional &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2011,7 +2561,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2021,7 +2571,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2071,7 +2621,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -2136,7 +2686,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2146,17 +2696,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2206,7 +2756,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2217,7 +2767,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2248,7 +2798,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2258,17 +2808,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2318,7 +2868,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2382,7 +2932,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (double, bool, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (double, bool, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -2400,33 +2950,33 @@ name: dropout_seed type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2496,12 +3046,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2514,11 +3064,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2614,8 +3164,8 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2631,7 +3181,7 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2742,8 +3292,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2774,7 +3324,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -4018,13 +4568,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -4032,12 +4582,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -6045,7 +6595,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor + schema_string: aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -6100,7 +6650,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor + schema_string: aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -6257,6 +6807,51 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _test_functorch_fallback + operator_name: _test_functorch_fallback + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all operator_name: all overload_name: dim @@ -6315,6 +6910,66 @@ with_gil: false deprecated: false has_math_kernel: false +- name: all + operator_name: all + overload_name: dims + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all_out operator_name: all overload_name: out @@ -6386,6 +7041,79 @@ with_gil: false deprecated: false has_math_kernel: false +- name: all_out + operator_name: all + overload_name: dims_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all operator_name: all overload_name: dimname @@ -6655,37 +7383,31 @@ with_gil: false deprecated: false has_math_kernel: false -- name: any_out +- name: any operator_name: any - overload_name: out + overload_name: dims manual_kernel_registration: false category_override: '' - schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - dynamic_type: int64_t - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: int64_t + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6693,32 +7415,27 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: int64_t - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: int64_t + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & method_of: - Type + - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -6726,30 +7443,37 @@ with_gil: false deprecated: false has_math_kernel: false -- name: any +- name: any_out operator_name: any - overload_name: dimname + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname + dynamic_type: int64_t is_nullable: false name: dim - type: at::Dimname + type: int64_t - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6757,39 +7481,45 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname + dynamic_type: int64_t is_nullable: false name: dim - type: at::Dimname + type: int64_t - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: any_out operator_name: any - overload_name: dimname_out + overload_name: dims_out manual_kernel_registration: false category_override: '' - schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -6804,17 +7534,18 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: at::Dimname + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6822,10 +7553,140 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: at::Dimname + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: any_out + operator_name: any + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname - annotation: null default: false dynamic_type: bool @@ -6874,7 +7735,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -6882,33 +7743,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -6949,7 +7810,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -6962,33 +7823,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -7035,7 +7896,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -7054,33 +7915,33 @@ name: step type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -7275,18 +8136,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7294,11 +8155,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7342,18 +8203,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7361,11 +8222,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7408,18 +8269,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7427,11 +8288,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7475,18 +8336,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7494,11 +8355,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -8277,12 +9138,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -8300,11 +9161,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -8345,12 +9206,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -8368,11 +9229,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -9367,7 +10228,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9375,33 +10236,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -9442,7 +10303,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9455,33 +10316,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -9514,22 +10375,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9550,7 +10411,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9561,22 +10422,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9629,12 +10490,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -9660,7 +10521,7 @@ is_nullable: false name: output_zero_point type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9671,12 +10532,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -9734,22 +10595,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9770,7 +10631,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9781,22 +10642,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9871,27 +10732,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var_transform - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9912,7 +10773,7 @@ is_nullable: false name: reservedSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9933,27 +10794,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var_transform - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -10009,13 +10870,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10023,12 +10884,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10066,13 +10927,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10080,12 +10941,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -10127,13 +10988,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -10146,12 +11007,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10187,13 +11048,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -10207,12 +11068,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10247,13 +11108,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10266,12 +11127,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10316,8 +11177,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10339,7 +11200,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -10378,14 +11239,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10402,7 +11263,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10454,14 +11315,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10478,7 +11339,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10535,14 +11396,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10564,7 +11425,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10621,14 +11482,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10650,7 +11511,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10702,20 +11563,20 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10732,13 +11593,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10778,14 +11639,14 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t is_nullable: false name: minlength type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10797,7 +11658,7 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -11241,12 +12102,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_not - operator_name: logical_not +- name: _lazy_clone + operator_name: _lazy_clone overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_not(Tensor self) -> Tensor + schema_string: aten::_lazy_clone(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -11277,119 +12138,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_not_ - operator_name: logical_not_ - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) - arguments: - - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: self - type: at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &) - schema_order_arguments: - - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: self - type: at::Tensor & - method_of: - - Type - - Tensor - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: self - type: at::Tensor & - inplace: true - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: logical_not_out +- name: logical_not operator_name: logical_not - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: logical_xor - operator_name: logical_xor overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + schema_string: aten::logical_not(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & method_of: - Type - Tensor @@ -11407,35 +12174,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_xor_ - operator_name: logical_xor_ +- name: logical_not_ + operator_name: logical_not_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) arguments: - annotation: a! dynamic_type: at::Tensor is_nullable: false name: self type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor & (at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor is_nullable: false name: self type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & method_of: - Type - Tensor @@ -11452,12 +12209,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_xor_out - operator_name: logical_xor +- name: logical_not_out + operator_name: logical_not overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -11471,23 +12228,163 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor + operator_name: logical_xor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_ + operator_name: logical_xor_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_out + operator_name: logical_xor + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -11830,7 +12727,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -11838,33 +12735,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -11905,7 +12802,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -11918,33 +12815,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -13371,18 +14268,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13390,17 +14287,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13435,14 +14332,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13454,13 +14351,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13491,18 +14388,18 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -13510,17 +14407,17 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13555,14 +14452,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -13574,13 +14471,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13618,18 +14515,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13637,17 +14534,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -13695,14 +14592,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13714,13 +14611,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -14361,18 +15258,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14380,17 +15277,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14425,14 +15322,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14444,13 +15341,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14481,18 +15378,18 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -14500,17 +15397,17 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14545,14 +15442,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -14564,13 +15461,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14608,18 +15505,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14627,17 +15524,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -14685,14 +15582,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14704,13 +15601,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -15047,7 +15944,7 @@ name: self type: const at::Tensor & - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: false kwarg_only: true @@ -15061,7 +15958,7 @@ name: self type: const at::Tensor & - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: false kwarg_only: true @@ -15088,7 +15985,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor + schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15104,7 +16001,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15135,7 +16032,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15151,7 +16048,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15203,7 +16100,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -15344,7 +16241,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15360,7 +16257,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15391,7 +16288,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15407,7 +16304,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15459,7 +16356,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -15593,7 +16490,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15609,7 +16506,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15660,7 +16557,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15676,7 +16573,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15748,7 +16645,7 @@ overload_name: deprecated manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15764,7 +16661,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15810,7 +16707,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15826,7 +16723,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15893,7 +16790,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor + schema_string: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15909,7 +16806,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15930,7 +16827,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15946,7 +16843,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15988,23 +16885,23 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggI - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggW - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggb - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -16055,23 +16952,23 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggI - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggW - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggb - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -16149,7 +17046,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16166,7 +17063,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16194,7 +17091,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16211,7 +17108,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16260,7 +17157,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16277,7 +17174,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16305,7 +17202,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16322,7 +17219,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16371,7 +17268,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16388,7 +17285,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16416,7 +17313,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16433,7 +17330,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16482,7 +17379,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16499,7 +17396,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16526,7 +17423,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16543,7 +17440,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16591,7 +17488,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16608,7 +17505,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16635,7 +17532,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16652,7 +17549,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16700,7 +17597,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16717,7 +17614,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16744,7 +17641,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16761,7 +17658,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16957,7 +17854,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor + schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16974,7 +17871,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17009,7 +17906,7 @@ name: dilation size: 1 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17026,7 +17923,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17082,7 +17979,7 @@ overload_name: input manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor + schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -17099,7 +17996,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17134,7 +18031,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17151,7 +18048,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17207,7 +18104,7 @@ overload_name: input manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor + schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -17224,7 +18121,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17259,7 +18156,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17276,7 +18173,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17923,12 +18820,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17936,11 +18833,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -17983,15 +18880,15 @@ is_nullable: true kwarg_only: true name: fweights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: aweights - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18011,14 +18908,14 @@ is_nullable: true kwarg_only: true name: fweights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: aweights - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -18245,17 +19142,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -18271,7 +19168,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18287,17 +19184,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -18364,22 +19261,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -18390,7 +19287,7 @@ is_nullable: false name: reserveSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18411,22 +19308,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -18464,7 +19361,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18574,13 +19471,20 @@ with_gil: false deprecated: false has_math_kernel: false -- name: cudnn_convolution_transpose - operator_name: cudnn_convolution_transpose - overload_name: '' +- name: cudnn_convolution_out + operator_name: cudnn_convolution + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + schema_string: aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -18596,11 +19500,6 @@ is_nullable: false name: padding type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18631,7 +19530,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18648,11 +19547,6 @@ is_nullable: false name: padding type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18683,6 +19577,13 @@ is_nullable: false name: allow_tf32 type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - namespace @@ -18690,8 +19591,8 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false is_factory_method: false abstract: true @@ -18699,12 +19600,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _mps_convolution_transpose - operator_name: _mps_convolution_transpose +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18741,76 +19642,201 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) - schema_order_arguments: - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: bool is_nullable: false - name: stride - type: at::IntArrayRef + name: benchmark + type: bool - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: bool is_nullable: false - name: dilation - type: at::IntArrayRef + name: deterministic + type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: bool is_nullable: false - name: groups - type: int64_t - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: mps_convolution_transpose_backward - operator_name: mps_convolution_transpose_backward - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) - arguments: + name: allow_tf32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_output - type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mps_convolution_transpose + operator_name: _mps_convolution_transpose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mps_convolution_transpose_backward + operator_name: mps_convolution_transpose_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -18917,7 +19943,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18933,7 +19959,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18954,7 +19980,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18970,7 +19996,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19012,7 +20038,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -19033,12 +20059,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19059,7 +20085,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -19080,12 +20106,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19967,13 +20993,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -19986,12 +21012,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20027,13 +21053,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20046,12 +21072,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20093,13 +21119,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20112,12 +21138,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20159,13 +21185,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20178,12 +21204,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20219,13 +21245,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20238,12 +21264,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20285,13 +21311,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20304,12 +21330,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20416,13 +21442,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20435,12 +21461,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20476,13 +21502,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20495,12 +21521,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20542,13 +21568,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20561,12 +21587,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20608,13 +21634,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20627,12 +21653,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20668,13 +21694,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20687,12 +21713,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20734,13 +21760,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20753,12 +21779,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -22040,14 +23066,14 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22071,13 +23097,13 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -22131,14 +23157,14 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22162,13 +23188,13 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -22205,19 +23231,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: spacing - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22225,7 +23251,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, const c10::optional &, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, const ::std::optional &, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22233,19 +23259,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: spacing - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22422,12 +23448,12 @@ name: spacing type: at::ArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22435,7 +23461,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22449,12 +23475,12 @@ name: spacing type: at::ArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22570,12 +23596,12 @@ name: spacing type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22583,7 +23609,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22597,12 +23623,12 @@ name: spacing type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22871,8 +23897,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22889,7 +23915,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -22929,8 +23955,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -22947,7 +23973,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -22993,8 +24019,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23011,7 +24037,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -23148,8 +24174,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23166,7 +24192,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23206,8 +24232,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23224,7 +24250,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23504,8 +24530,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23522,7 +24548,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23562,8 +24588,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23580,7 +24606,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23626,8 +24652,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23644,7 +24670,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -23690,8 +24716,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23708,7 +24734,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23748,8 +24774,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23766,7 +24792,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -24252,7 +25278,7 @@ name: tensors type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -24271,7 +25297,7 @@ name: tensors type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -24719,7 +25745,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -24732,7 +25758,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -24772,7 +25798,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -24997,14 +26023,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: include_last_offset type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25044,7 +26070,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25117,7 +26143,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -25127,8 +26153,8 @@ dynamic_type: int64_t is_nullable: true name: padding_idx - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25164,7 +26190,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -25174,7 +26200,7 @@ dynamic_type: int64_t is_nullable: true name: padding_idx - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -25245,7 +26271,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25258,7 +26284,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25298,7 +26324,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25397,14 +26423,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, bool, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, bool, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25460,7 +26486,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25478,11 +26504,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _embedding_bag_sparse_backward operator_name: _embedding_bag_sparse_backward overload_name: '' @@ -25534,14 +26560,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25587,7 +26613,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25661,14 +26687,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25714,7 +26740,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25851,7 +26877,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -25860,13 +26886,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -25878,42 +26904,42 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -25950,13 +26976,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -25964,40 +26990,120 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_permuted + operator_name: empty_permuted + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: physical_layout + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: physical_layout + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -26038,7 +27144,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26051,33 +27157,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26123,7 +27229,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26141,33 +27247,33 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26213,7 +27319,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26231,33 +27337,33 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26298,7 +27404,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26311,33 +27417,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26378,7 +27484,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26391,33 +27497,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26439,7 +27545,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + schema_string: aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26468,13 +27574,13 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, double, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, double, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26482,33 +27588,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: double @@ -26524,12 +27630,12 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26551,7 +27657,7 @@ overload_name: '' manual_kernel_registration: false category_override: factory - schema_string: aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + schema_string: aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26584,13 +27690,13 @@ name: options type: at::TensorOptions - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26616,40 +27722,40 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26684,13 +27790,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -26703,12 +27809,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26730,7 +27836,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!) + schema_string: aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) arguments: - annotation: a! dynamic_type: at::Tensor @@ -26805,13 +27911,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26824,40 +27930,40 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26894,13 +28000,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26908,12 +28014,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -26957,13 +28063,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26971,40 +28077,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27045,7 +28151,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -27058,33 +28164,33 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27815,7 +28921,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -27829,7 +28935,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -27837,33 +28943,33 @@ name: n type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27885,7 +28991,7 @@ overload_name: m manual_kernel_registration: false category_override: '' - schema_string: aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -27904,7 +29010,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -27917,33 +29023,33 @@ name: m type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27965,7 +29071,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -28014,7 +29120,7 @@ overload_name: m_out manual_kernel_registration: false category_override: '' - schema_string: aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -28321,7 +29427,7 @@ overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) + schema_string: aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -28377,7 +29483,7 @@ overload_name: Dimname manual_kernel_registration: false category_override: '' - schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -28932,11 +30038,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: floor_divide_ operator_name: floor_divide_ overload_name: Scalar @@ -28977,11 +30083,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: frac operator_name: frac overload_name: '' @@ -29125,7 +30231,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -29133,7 +30239,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -29150,35 +30256,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29219,7 +30325,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -29232,33 +30338,33 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29359,13 +30465,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -29378,40 +30484,40 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29441,17 +30547,17 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -29459,7 +30565,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (c10::string_view, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (c10::string_view, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: c10::string_view @@ -29467,45 +30573,45 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30427,7 +31533,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30435,33 +31541,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30502,7 +31608,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30515,33 +31621,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30577,7 +31683,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30585,33 +31691,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30652,7 +31758,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30665,33 +31771,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30737,7 +31843,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30755,33 +31861,33 @@ name: alpha type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30832,7 +31938,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30855,33 +31961,33 @@ name: beta type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30917,7 +32023,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30925,33 +32031,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30992,7 +32098,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -31005,33 +32111,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -31077,7 +32183,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -31095,33 +32201,33 @@ name: beta type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -31229,13 +32335,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -31248,7 +32354,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const ::std::optional &, const ::std::optional &, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31265,13 +32371,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -31316,12 +32422,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31347,7 +32453,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31358,12 +32464,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31442,7 +32548,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31468,7 +32574,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31494,7 +32600,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31691,7 +32797,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor + schema_string: aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -31756,7 +32862,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -32061,20 +33167,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_get_plan_cache_size(int device_index) -> int + schema_string: aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: int64_t (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: int64_t (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32096,20 +33202,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_get_plan_cache_max_size(int device_index) -> int + schema_string: aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: int64_t (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: int64_t (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32131,25 +33237,25 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () + schema_string: aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> () arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex - annotation: null dynamic_type: int64_t is_nullable: false name: max_size type: int64_t - schema_order_cpp_signature: void (int64_t, int64_t) + schema_order_cpp_signature: void (at::DeviceIndex, int64_t) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex - annotation: null dynamic_type: int64_t is_nullable: false @@ -32173,20 +33279,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_clear_plan_cache(int device_index) -> () + schema_string: aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> () arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: void (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: void (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32213,11 +33319,11 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &) + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32225,10 +33331,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & method_of: - Type - Tensor @@ -32266,11 +33372,11 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, at::Tensor &) + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32278,10 +33384,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -32305,6 +33411,181 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _unsafe_index + operator_name: _unsafe_index + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_masked_index + operator_name: _unsafe_masked_index + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::List<::std::optional> &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_masked_index_put_accumulate + operator_name: _unsafe_masked_index_put_accumulate + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_masked_index_put_accumulate(Tensor self, Tensor mask, Tensor?[] indices, Tensor values) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: index_copy_out operator_name: index_copy overload_name: out @@ -32659,10 +33940,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32674,7 +33955,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -32682,10 +33963,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32727,10 +34008,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32742,7 +34023,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32750,10 +34031,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32782,6 +34063,73 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _unsafe_index_put + operator_name: _unsafe_index_put + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _index_put_impl_ operator_name: _index_put_impl_ overload_name: '' @@ -32795,10 +34143,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32816,7 +34164,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool) + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -32824,10 +34172,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32877,22 +34225,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -32913,7 +34261,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32924,22 +34272,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -34536,13 +35884,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -34555,7 +35903,7 @@ is_nullable: false name: cudnn_enable type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34572,13 +35920,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -34628,18 +35976,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34655,12 +36003,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -34724,18 +36072,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34766,12 +36114,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false @@ -34799,6 +36147,75 @@ with_gil: false deprecated: false has_math_kernel: false +- name: rms_norm + operator_name: rms_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rms_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const ::std::optional &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: nan_to_num operator_name: nan_to_num overload_name: '' @@ -34812,24 +36229,24 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34837,23 +36254,23 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -34884,24 +36301,24 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -34909,23 +36326,23 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -34963,24 +36380,24 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34988,23 +36405,23 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -35050,8 +36467,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35068,7 +36485,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -35185,8 +36602,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35203,7 +36620,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -35249,8 +36666,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35267,7 +36684,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -35478,6 +36895,759 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _cslt_compress + operator_name: _cslt_compress + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_compress(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cslt_sparse_mm + operator_name: _cslt_sparse_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: alg_id + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: alg_id + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cslt_sparse_mm_search + operator_name: _cslt_sparse_mm_search + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_tile + operator_name: _sparse_semi_structured_tile + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '""' + dynamic_type: c10::string_view + is_nullable: false + name: algorithm + type: c10::string_view + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: use_cutlass + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '""' + dynamic_type: c10::string_view + is_nullable: false + name: algorithm + type: c10::string_view + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: use_cutlass + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_apply + operator_name: _sparse_semi_structured_apply + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_apply_dense + operator_name: _sparse_semi_structured_apply_dense + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_linear + operator_name: _sparse_semi_structured_linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: meta + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: meta + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_mm + operator_name: _sparse_semi_structured_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_addmm + operator_name: _sparse_semi_structured_addmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mixed_dtypes_linear + operator_name: _mixed_dtypes_linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: fbgemm_linear_int8_weight_fp32_activation operator_name: fbgemm_linear_int8_weight_fp32_activation overload_name: '' @@ -35747,6 +37917,166 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _wrapped_linear_prepack + operator_name: _wrapped_linear_prepack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_wrapped_linear_prepack(Tensor weight, Tensor weight_scale, Tensor weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _wrapped_quantized_linear_prepacked + operator_name: _wrapped_quantized_linear_prepacked + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_wrapped_quantized_linear_prepacked(Tensor input, Tensor input_scale, Tensor input_zero_point, Tensor packed_weight, Tensor output_scale, Tensor output_zero_point, int out_channel) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_channel + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_channel + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: fbgemm_linear_fp16_weight_fp32_activation operator_name: fbgemm_linear_fp16_weight_fp32_activation overload_name: '' @@ -36127,7 +38457,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -36145,33 +38475,33 @@ name: steps type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -36188,59 +38518,80 @@ with_gil: false deprecated: false has_math_kernel: false -- name: linspace_out +- name: linspace operator_name: linspace - overload_name: out + overload_name: Tensor_Tensor manual_kernel_registration: false - category_override: '' - schema_string: aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + schema_string: aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: start - type: const at::Scalar & + type: const at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: end - type: const at::Scalar & + type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false name: steps type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &) + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: start - type: const at::Scalar & + type: const at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: end - type: const at::Scalar & + type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false name: steps type: int64_t - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -36248,37 +38599,91 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false - is_factory_method: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log - operator_name: log - overload_name: '' +- name: linspace + operator_name: linspace + overload_name: Tensor_Scalar manual_kernel_registration: false - category_override: '' - schema_string: aten::log(Tensor self) -> Tensor + category_override: factory + schema_string: aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: start type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: start type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - - Tensor - namespace mode: native python_module: '' @@ -36287,54 +38692,108 @@ name: result type: at::Tensor inplace: false - is_factory_method: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log_ - operator_name: log_ - overload_name: '' +- name: linspace + operator_name: linspace + overload_name: Scalar_Tensor manual_kernel_registration: false - category_override: '' - schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + category_override: factory + schema_string: aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - - annotation: a! + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &) + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - - annotation: a! + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: at::Tensor & + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: self - type: at::Tensor & - inplace: true - is_factory_method: false + name: result + type: at::Tensor + inplace: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log_out - operator_name: log +- name: linspace_out + operator_name: linspace overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -36344,28 +38803,376 @@ output: true type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: + name: start + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: self - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Scalar_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log + operator_name: log + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_ + operator_name: log_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_out + operator_name: log + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native python_module: '' returns: - dynamic_type: at::Tensor @@ -37392,7 +40199,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -37416,33 +40223,339 @@ name: base type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -37540,6 +40653,249 @@ with_gil: false deprecated: false has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Scalar_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: log_softmax operator_name: log_softmax overload_name: int @@ -37558,12 +40914,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37576,11 +40932,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -37623,12 +40979,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37641,11 +40997,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -37687,13 +41043,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37706,12 +41062,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -39126,12 +42482,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39139,7 +42495,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -39147,12 +42503,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39211,12 +42567,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39224,7 +42580,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -39232,12 +42588,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -40975,6 +44331,107 @@ with_gil: false deprecated: false has_math_kernel: false +- name: quantized_max_pool3d + operator_name: quantized_max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: max_pool3d operator_name: max_pool3d overload_name: '' @@ -41089,13 +44546,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41103,12 +44560,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41126,6 +44583,69 @@ with_gil: false deprecated: false has_math_kernel: false +- name: mean_out + operator_name: mean + overload_name: dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: mean operator_name: mean overload_name: dim @@ -41151,13 +44671,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41177,12 +44697,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41232,13 +44752,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41258,12 +44778,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -41312,13 +44832,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41338,12 +44858,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41393,13 +44913,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41419,12 +44939,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -41461,7 +44981,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41474,13 +44994,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41488,7 +45008,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41501,12 +45021,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41544,7 +45064,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41557,13 +45077,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41571,7 +45091,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41584,12 +45104,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -42769,7 +46289,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -42785,7 +46305,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -42806,7 +46326,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -42822,7 +46342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -42864,7 +46384,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -42975,7 +46495,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -42991,7 +46511,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43012,7 +46532,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43028,7 +46548,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43320,17 +46840,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43381,7 +46901,7 @@ is_nullable: false name: workspace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43437,17 +46957,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43553,17 +47073,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43579,7 +47099,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43595,17 +47115,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43669,28 +47189,28 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43711,22 +47231,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -43759,7 +47279,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -43775,7 +47295,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43806,7 +47326,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43822,7 +47342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43874,7 +47394,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -43890,7 +47410,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43926,7 +47446,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43942,7 +47462,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43999,7 +47519,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44015,7 +47535,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44046,7 +47566,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44062,7 +47582,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44114,7 +47634,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44130,7 +47650,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44151,7 +47671,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44167,7 +47687,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44209,7 +47729,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44230,12 +47750,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44256,7 +47776,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44277,12 +47797,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44350,7 +47870,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44395,8 +47915,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44422,7 +47942,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44467,7 +47987,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -44532,7 +48052,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44542,17 +48062,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44597,7 +48117,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44608,7 +48128,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44639,7 +48159,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44649,17 +48169,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44704,7 +48224,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44845,6 +48365,275 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _int_mm + operator_name: _int_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_int_mm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _int_mm_out + operator_name: _int_mm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_weight_to_int4pack + operator_name: _convert_weight_to_int4pack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: innerKTiles + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: innerKTiles + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_int4pack_mm + operator_name: _weight_int4pack_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: qGroupSize + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qScaleAndZeros + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: qGroupSize + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qScaleAndZeros + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_int8pack_mm + operator_name: _weight_int8pack_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _sparse_mm operator_name: _sparse_mm overload_name: '' @@ -46251,7 +50040,7 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: false with_gil: false deprecated: false @@ -46338,22 +50127,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46369,7 +50158,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46380,22 +50169,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46470,22 +50259,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46501,7 +50290,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46512,22 +50301,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46602,12 +50391,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46633,7 +50422,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46644,12 +50433,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46697,6 +50486,107 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _native_batch_norm_legit_no_training + operator_name: _native_batch_norm_legit_no_training + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _native_batch_norm_legit_out operator_name: _native_batch_norm_legit overload_name: out @@ -46734,12 +50624,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46765,7 +50655,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46776,12 +50666,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46866,12 +50756,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46887,7 +50777,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46898,12 +50788,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46978,12 +50868,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46999,7 +50889,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47010,12 +50900,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47138,12 +51028,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47159,7 +51049,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47170,12 +51060,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47230,12 +51120,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47251,7 +51141,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47262,12 +51152,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47332,12 +51222,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47353,7 +51243,7 @@ is_nullable: false name: count type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47374,12 +51264,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47440,12 +51330,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47461,7 +51351,7 @@ is_nullable: false name: counts type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47482,12 +51372,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47543,27 +51433,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47579,7 +51469,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47595,27 +51485,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47684,7 +51574,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47700,7 +51590,7 @@ is_nullable: false name: bias_g type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47726,7 +51616,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47772,7 +51662,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor + schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -47798,23 +51688,23 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: count type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47840,16 +51730,16 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -47888,18 +51778,18 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: momentum type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47910,12 +51800,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47995,7 +51885,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor + schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -48011,7 +51901,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -48025,7 +51915,7 @@ name: stride size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48041,7 +51931,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -48088,7 +51978,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -48096,7 +51986,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -48108,35 +51998,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48172,7 +52062,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -48180,33 +52070,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48292,13 +52182,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48306,40 +52196,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48461,12 +52351,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48485,11 +52375,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48577,8 +52467,8 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48599,7 +52489,7 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -49460,7 +53350,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::channel_shuffle(Tensor self, int groups) -> Tensor + schema_string: aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -49505,7 +53395,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::native_channel_shuffle(Tensor self, int groups) -> Tensor + schema_string: aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -49558,12 +53448,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: bool (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: bool (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -49571,11 +53461,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -49605,12 +53495,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -49618,11 +53508,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -49652,12 +53542,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -49665,11 +53555,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50080,7 +53970,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -50088,33 +53978,33 @@ name: s type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50148,7 +54038,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50156,7 +54046,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50168,35 +54058,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50230,13 +54120,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50244,7 +54134,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50256,41 +54146,41 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50326,7 +54216,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50334,33 +54224,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50394,7 +54284,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50402,7 +54292,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50414,35 +54304,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50532,8 +54422,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50545,7 +54435,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -50589,13 +54479,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -50603,40 +54493,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50658,7 +54548,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50677,7 +54567,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50695,28 +54585,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50738,7 +54628,7 @@ overload_name: generator manual_kernel_registration: false category_override: '' - schema_string: aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50755,7 +54645,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -50763,7 +54653,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50780,35 +54670,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50830,7 +54720,7 @@ overload_name: low manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50854,7 +54744,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50877,28 +54767,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50920,7 +54810,7 @@ overload_name: low_generator manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50942,7 +54832,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -50950,7 +54840,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50972,35 +54862,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51022,7 +54912,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51081,7 +54971,7 @@ overload_name: generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51105,8 +54995,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -51123,7 +55013,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51152,7 +55042,7 @@ overload_name: low_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51221,7 +55111,7 @@ overload_name: low_generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51250,8 +55140,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -51273,7 +55163,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51302,7 +55192,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + schema_string: aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -51322,13 +55212,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51341,40 +55231,40 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51396,7 +55286,7 @@ overload_name: low_dtype manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + schema_string: aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -51421,13 +55311,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51445,40 +55335,40 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51514,7 +55404,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51522,33 +55412,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51582,7 +55472,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51590,7 +55480,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51602,35 +55492,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51664,7 +55554,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51672,7 +55562,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51684,35 +55574,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51746,13 +55636,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51760,7 +55650,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51772,41 +55662,41 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51896,8 +55786,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51909,7 +55799,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51953,13 +55843,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51967,40 +55857,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52022,7 +55912,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -52036,7 +55926,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52049,28 +55939,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52092,7 +55982,7 @@ overload_name: generator manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -52104,7 +55994,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -52112,7 +56002,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52124,35 +56014,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52174,7 +56064,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -52223,7 +56113,7 @@ overload_name: generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -52242,8 +56132,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52255,7 +56145,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -52309,7 +56199,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -52328,33 +56218,33 @@ name: step type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52395,7 +56285,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -52408,33 +56298,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53030,7 +56920,7 @@ overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53038,13 +56928,13 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53052,12 +56942,12 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53079,7 +56969,7 @@ overload_name: self_Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53092,19 +56982,19 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53117,18 +57007,18 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -53151,7 +57041,7 @@ overload_name: self_int manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53164,19 +57054,19 @@ name: repeats type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53189,18 +57079,18 @@ name: repeats type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -53764,12 +57654,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53795,11 +57685,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53847,12 +57737,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -53878,11 +57768,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -55810,12 +59700,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -55823,11 +59713,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -55858,12 +59748,12 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -55871,11 +59761,11 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -55913,12 +59803,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -55926,11 +59816,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -56480,227 +60370,132 @@ with_gil: false deprecated: false has_math_kernel: true -- name: slice - operator_name: slice - overload_name: Tensor +- name: sym_size + operator_name: sym_size + overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + schema_string: aten::sym_size.int(Tensor self, int dim) -> SymInt arguments: - - annotation: a + - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: 0 dynamic_type: int64_t is_nullable: false name: dim type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &, int64_t) schema_order_arguments: - - annotation: a + - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: 0 dynamic_type: int64_t is_nullable: false name: dim type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: false with_gil: false deprecated: false - has_math_kernel: false -- name: slice_backward - operator_name: slice_backward + has_math_kernel: true +- name: sym_numel + operator_name: sym_numel overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + schema_string: aten::sym_numel(Tensor self) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_output + name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: input_sizes - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: start - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: end - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_output + name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: input_sizes - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: start - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: end - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: false with_gil: false deprecated: false - has_math_kernel: false -- name: slice_scatter - operator_name: slice_scatter + has_math_kernel: true +- name: sym_storage_offset + operator_name: sym_storage_offset overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + schema_string: aten::sym_storage_offset(Tensor self) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: src - type: const at::Tensor & - - annotation: null - default: 0 - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: c10::SymInt + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: slice + operator_name: slice + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + arguments: + - annotation: a dynamic_type: at::Tensor is_nullable: false - name: src + name: self type: const at::Tensor & - annotation: null default: 0 @@ -56709,17 +60504,321 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_backward + operator_name: slice_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_inverse + operator_name: slice_inverse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_scatter + operator_name: slice_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -56919,12 +61018,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -56947,11 +61046,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -57033,12 +61132,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57051,11 +61150,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -57098,12 +61197,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57116,11 +61215,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -57162,13 +61261,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57181,12 +61280,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -58563,6 +62662,130 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _chunk_cat + operator_name: _chunk_cat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _chunk_cat_out + operator_name: _chunk_cat + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: stack operator_name: stack overload_name: '' @@ -59049,23 +63272,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -59073,18 +63296,18 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59097,23 +63320,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -59121,17 +63344,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59167,23 +63390,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59203,18 +63426,18 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, c10::string_view, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, c10::string_view, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59227,23 +63450,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59263,17 +63486,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59309,23 +63532,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59339,24 +63562,24 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: length - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: return_complex type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, bool, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, bool, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59369,23 +63592,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59399,17 +63622,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: length - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -59524,12 +63747,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sum - operator_name: sum - overload_name: '' +- name: sym_stride + operator_name: sym_stride + overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + schema_string: aten::sym_stride.int(Tensor self, int dim) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor @@ -59537,75 +63760,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - method_of: - - Type - - Tensor - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: sum - operator_name: sum - overload_name: dim_IntList - manual_kernel_registration: false - category_override: '' - schema_string: aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor - arguments: - - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: true name: dim - size: 1 - type: at::OptionalIntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: int64_t + schema_order_cpp_signature: c10::SymInt (const at::Tensor &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59613,47 +63772,32 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: true - name: dim - size: 1 - type: at::OptionalIntArrayRef - - annotation: null - default: false - dynamic_type: bool + dynamic_type: int64_t is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: dim + type: int64_t method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true - device_guard: true + abstract: false + device_guard: false with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true - name: sum operator_name: sum - overload_name: dim_DimnameList + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -59661,25 +63805,13 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::DimnameList - is_nullable: false - name: dim - size: 1 - type: at::DimnameList - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59687,24 +63819,12 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::DimnameList - is_nullable: false - name: dim - size: 1 - type: at::DimnameList - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59717,25 +63837,18 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sum_out + has_math_kernel: false +- name: sum operator_name: sum - overload_name: IntList_out + overload_name: dim_IntList manual_kernel_registration: false category_override: '' - schema_string: aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -59754,13 +63867,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59780,28 +63893,22 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + type: ::std::optional method_of: - Type + - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -59809,20 +63916,13 @@ with_gil: false deprecated: false has_math_kernel: false -- name: sum_out +- name: sum operator_name: sum - overload_name: DimnameList_out + overload_name: dim_DimnameList manual_kernel_registration: false category_override: '' - schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -59841,13 +63941,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59867,12 +63967,180 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sum_out + operator_name: sum + overload_name: IntList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_out + operator_name: sum + overload_name: DimnameList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -59978,7 +64246,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -59991,13 +64259,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60005,7 +64273,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60018,12 +64286,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -60061,7 +64329,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60074,13 +64342,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60088,7 +64356,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60101,12 +64369,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -60135,7 +64403,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sum_to_size(Tensor self, int[] size) -> Tensor + schema_string: aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -60542,7 +64810,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -60550,19 +64818,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60570,7 +64838,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60578,19 +64846,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60744,7 +65012,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -60752,19 +65020,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60772,7 +65040,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60780,19 +65048,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60898,7 +65166,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -60912,12 +65180,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60925,7 +65193,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60939,12 +65207,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61061,7 +65329,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -61076,19 +65344,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61096,7 +65364,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61104,19 +65372,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61309,7 +65577,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -61323,12 +65591,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61336,7 +65604,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61350,12 +65618,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61385,7 +65653,7 @@ overload_name: correction_names_out manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -61406,12 +65674,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61419,7 +65687,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61433,12 +65701,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61482,13 +65750,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61496,12 +65764,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61543,13 +65811,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61568,12 +65836,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61622,13 +65890,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61647,12 +65915,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -61700,13 +65968,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61725,12 +65993,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61779,13 +66047,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61804,12 +66072,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -62285,11 +66553,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true - name: threshold operator_name: threshold overload_name: '' @@ -62598,7 +66866,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::tile(Tensor self, int[] dims) -> Tensor + schema_string: aten::tile(Tensor self, SymInt[] dims) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63086,7 +67354,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor + schema_string: aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63737,19 +68005,19 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _nested_tensor_offsets - operator_name: _nested_tensor_offsets +- name: _nested_tensor_storage_offsets + operator_name: _nested_tensor_storage_offsets overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_tensor_offsets(Tensor self) -> int[] + schema_string: aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -63762,9 +68030,9 @@ mode: native python_module: '' returns: - - dynamic_type: at::IntArrayRef + - dynamic_type: at::Tensor name: result - type: ::std::vector + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -63822,7 +68090,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a) + schema_string: aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -63840,11 +68108,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -63862,10 +68130,10 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef + type: const at::Tensor & method_of: - Type - namespace @@ -63887,7 +68155,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor + schema_string: aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63905,11 +68173,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -63927,10 +68195,10 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef + type: const at::Tensor & method_of: - Type - namespace @@ -63947,55 +68215,579 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _trilinear - operator_name: _trilinear +- name: _nested_view_from_jagged + operator_name: _nested_view_from_jagged overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + schema_string: aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor(a) arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: i1 + name: offsets type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: i2 + name: dummy type: const at::Tensor & - annotation: null + default: '{}' dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t is_nullable: false - name: i3 + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: expand1 - type: at::IntArrayRef + name: offsets + type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: expand2 - type: at::IntArrayRef + name: dummy + type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t is_nullable: false - name: expand3 - type: at::IntArrayRef + name: ragged_idx + type: int64_t - annotation: null - dynamic_type: at::IntArrayRef + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_view_from_jagged_copy + operator_name: _nested_view_from_jagged_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: sumdim - type: at::IntArrayRef + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & - annotation: null default: 1 dynamic_type: int64_t is_nullable: false - name: unroll_dim + name: ragged_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values + operator_name: _nested_get_values + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values_copy + operator_name: _nested_get_values_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values_copy(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_offsets + operator_name: _nested_get_offsets + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_offsets(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_lengths + operator_name: _nested_get_lengths + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_lengths(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_ragged_idx + operator_name: _nested_get_ragged_idx + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_ragged_idx(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_min_seqlen + operator_name: _nested_get_min_seqlen + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_min_seqlen(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_max_seqlen + operator_name: _nested_get_max_seqlen + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_max_seqlen(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_jagged_dummy + operator_name: _nested_get_jagged_dummy + overload_name: '' + manual_kernel_registration: false + category_override: dummy + schema_string: aten::_nested_get_jagged_dummy(Tensor any) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: any + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: any + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_compute_contiguous_strides_offsets + operator_name: _nested_compute_contiguous_strides_offsets + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: nested_size + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: nested_size + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _trilinear + operator_name: _trilinear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -64675,12 +69467,12 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -64700,11 +69492,11 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -65028,18 +69820,18 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: N - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: increasing type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65047,11 +69839,11 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: N - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -65199,7 +69991,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -65207,19 +69999,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65227,7 +70019,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65235,19 +70027,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65362,7 +70154,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -65377,19 +70169,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65397,7 +70189,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65405,19 +70197,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65610,7 +70402,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -65624,12 +70416,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65637,7 +70429,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65651,12 +70443,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65686,7 +70478,7 @@ overload_name: correction_names_out manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -65707,12 +70499,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65720,7 +70512,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65734,12 +70526,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65899,7 +70691,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -65907,19 +70699,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65927,7 +70719,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65935,19 +70727,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66053,7 +70845,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -66067,12 +70859,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66080,7 +70872,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -66094,12 +70886,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66846,7 +71638,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -66854,7 +71646,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66866,35 +71658,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -66916,7 +71708,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66930,7 +71722,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66938,33 +71730,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67000,7 +71792,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -67008,33 +71800,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67120,13 +71912,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67134,40 +71926,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67242,12 +72034,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67255,11 +72047,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67344,12 +72136,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67357,11 +72149,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67391,12 +72183,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67404,11 +72196,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67443,12 +72235,12 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67461,11 +72253,11 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67545,7 +72337,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -67561,8 +72353,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67573,7 +72365,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -67589,7 +72381,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67606,6 +72398,515 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _batch_norm_with_update + operator_name: _batch_norm_with_update + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_with_update_out + operator_name: _batch_norm_with_update + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) + arguments: + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: e! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: f! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + - allocate: true + annotation: g! + dynamic_type: at::Tensor + is_nullable: false + name: reserve + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: e! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: f! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + - allocate: true + annotation: g! + dynamic_type: at::Tensor + is_nullable: false + name: reserve + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_mean + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_invstd + type: at::Tensor & + - dynamic_type: at::Tensor + name: reserve + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_no_update + operator_name: _batch_norm_no_update + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward + operator_name: batch_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: update + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: update + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _sparse_sum operator_name: _sparse_sum overload_name: '' @@ -67874,13 +73175,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67900,12 +73201,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67947,13 +73248,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67973,12 +73274,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68013,12 +73314,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68031,11 +73332,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68070,13 +73371,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68089,12 +73390,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68249,12 +73550,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68267,11 +73568,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68306,13 +73607,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68325,12 +73626,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68490,12 +73791,12 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68513,11 +73814,11 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68550,14 +73851,14 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68568,7 +73869,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false @@ -68656,7 +73957,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68674,7 +73975,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68685,7 +73986,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68736,7 +74037,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68749,7 +74050,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68760,7 +74061,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68813,7 +74114,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68831,7 +74132,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68842,7 +74143,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68906,7 +74207,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68919,7 +74220,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68930,7 +74231,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68982,7 +74283,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69000,7 +74301,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69011,7 +74312,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69062,7 +74363,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69075,7 +74376,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69086,7 +74387,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69139,7 +74440,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69157,7 +74458,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69168,7 +74469,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69232,7 +74533,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69245,7 +74546,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69256,7 +74557,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69790,13 +75091,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69804,12 +75105,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -69881,13 +75182,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -69900,12 +75201,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -71767,89 +77068,103 @@ with_gil: false deprecated: false has_math_kernel: false -- name: sparse_compressed_tensor - operator_name: sparse_compressed_tensor - overload_name: comp_plain_value_size +- name: _scaled_mm + operator_name: _scaled_mm + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_scaled_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - dynamic_type: at::TensorOptions + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: scale_result + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool is_nullable: false - kwarg_only: true - name: options - type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + name: use_fast_accum + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: bias + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + name: scale_result + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Device + default: ::std::nullopt + dynamic_type: at::ScalarType is_nullable: true - kwarg_only: true - name: device - type: c10::optional + name: out_dtype + type: ::std::optional - annotation: null default: false dynamic_type: bool - is_nullable: true - kwarg_only: true - name: pin_memory - type: c10::optional + is_nullable: false + name: use_fast_accum + type: bool method_of: - Type - namespace @@ -71860,95 +77175,123 @@ name: result type: at::Tensor inplace: false - is_factory_method: true - abstract: false + is_factory_method: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_csr_tensor - operator_name: sparse_csr_tensor - overload_name: crow_col_value_size + has_math_kernel: false +- name: _scaled_mm_out + operator_name: _scaled_mm + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_scaled_mm.out(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - dynamic_type: at::TensorOptions + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: scale_result + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool is_nullable: false - kwarg_only: true - name: options - type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + name: use_fast_accum + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: bias + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + name: scale_result + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Device + default: ::std::nullopt + dynamic_type: at::ScalarType is_nullable: true - kwarg_only: true - name: device - type: c10::optional + name: out_dtype + type: ::std::optional - annotation: null default: false dynamic_type: bool - is_nullable: true - kwarg_only: true - name: pin_memory - type: c10::optional + is_nullable: false + name: use_fast_accum + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - namespace @@ -71956,98 +77299,108 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false - is_factory_method: true - abstract: false + is_factory_method: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_csc_tensor - operator_name: sparse_csc_tensor - overload_name: ccol_row_value_size + has_math_kernel: false +- name: _sparse_compressed_tensor_with_dims + operator_name: _sparse_compressed_tensor_with_dims + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: ccol_indices - type: const at::Tensor & + name: nnz + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: row_indices - type: const at::Tensor & + name: dense_dim + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: values - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: blocksize type: at::IntArrayRef + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: index_dtype + type: at::ScalarType - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, at::IntArrayRef, at::ScalarType, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: ccol_indices - type: const at::Tensor & + name: nnz + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: row_indices - type: const at::Tensor & + name: dense_dim + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: values - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: blocksize type: at::IntArrayRef - annotation: null - default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: false + name: index_dtype + type: at::ScalarType + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72059,27 +77412,27 @@ type: at::Tensor inplace: false is_factory_method: true - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_bsr_tensor - operator_name: sparse_bsr_tensor - overload_name: crow_col_value_size + has_math_kernel: false +- name: sparse_compressed_tensor + operator_name: sparse_compressed_tensor + overload_name: comp_plain_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72097,17 +77450,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72120,33 +77473,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72158,27 +77511,27 @@ type: at::Tensor inplace: false is_factory_method: true - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_bsc_tensor - operator_name: sparse_bsc_tensor - overload_name: ccol_row_value_size + has_math_kernel: false +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor + overload_name: crow_col_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72196,17 +77549,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72219,33 +77572,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72262,45 +77615,50 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_compressed_tensor - operator_name: sparse_compressed_tensor - overload_name: comp_plain_value +- name: sparse_csc_tensor + operator_name: sparse_csc_tensor + overload_name: ccol_row_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72308,33 +77666,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72351,12 +77714,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_csr_tensor - operator_name: sparse_csr_tensor - overload_name: crow_col_value +- name: sparse_bsr_tensor + operator_name: sparse_bsr_tensor + overload_name: crow_col_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72373,13 +77736,18 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72397,33 +77765,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72440,12 +77813,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_csc_tensor - operator_name: sparse_csc_tensor - overload_name: ccol_row_value +- name: sparse_bsc_tensor + operator_name: sparse_bsc_tensor + overload_name: ccol_row_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72462,13 +77835,18 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72486,33 +77864,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72529,12 +77912,101 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_bsr_tensor - operator_name: sparse_bsr_tensor +- name: sparse_compressed_tensor + operator_name: sparse_compressed_tensor + overload_name: comp_plain_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: plain_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: plain_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor overload_name: crow_col_value manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72557,7 +78029,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72575,33 +78047,33 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72618,12 +78090,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_bsc_tensor - operator_name: sparse_bsc_tensor +- name: sparse_csc_tensor + operator_name: sparse_csc_tensor overload_name: ccol_row_value manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72646,7 +78118,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72664,33 +78136,33 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72707,22 +78179,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_compressed_tensor_unsafe - operator_name: _sparse_compressed_tensor_unsafe - overload_name: '' +- name: sparse_bsr_tensor + operator_name: sparse_bsr_tensor + overload_name: crow_col_value manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72730,28 +78202,22 @@ name: values type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: size - type: at::IntArrayRef - - annotation: null - default: '{}' dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72759,38 +78225,122 @@ name: values type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_bsc_tensor + operator_name: sparse_bsc_tensor + overload_name: ccol_row_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: ccol_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ccol_indices + type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72807,22 +78357,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_csr_tensor_unsafe - operator_name: _sparse_csr_tensor_unsafe +- name: _sparse_compressed_tensor_unsafe + operator_name: _sparse_compressed_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72841,17 +78391,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72864,33 +78414,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72907,22 +78457,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_csc_tensor_unsafe - operator_name: _sparse_csc_tensor_unsafe +- name: _sparse_csr_tensor_unsafe + operator_name: _sparse_csr_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72941,17 +78491,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72964,33 +78514,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73007,22 +78557,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_bsr_tensor_unsafe - operator_name: _sparse_bsr_tensor_unsafe +- name: _sparse_csc_tensor_unsafe + operator_name: _sparse_csc_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -73041,8 +78591,79 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ccol_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_bsr_tensor_unsafe + operator_name: _sparse_bsr_tensor_unsafe + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -73064,33 +78685,62 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73141,7 +78791,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73164,33 +78814,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73225,7 +78875,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -73233,33 +78883,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73281,7 +78931,7 @@ overload_name: indices manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73300,7 +78950,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73313,33 +78970,40 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73361,7 +79025,7 @@ overload_name: indices_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73385,7 +79049,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73403,33 +79074,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73451,7 +79129,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73475,7 +79153,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73493,33 +79178,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73541,7 +79233,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () + schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> () arguments: - annotation: null dynamic_type: at::Tensor @@ -73558,7 +79250,13 @@ is_nullable: false name: size type: at::IntArrayRef - schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73575,6 +79273,12 @@ is_nullable: false name: size type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73936,7 +79640,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -73954,33 +79658,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -74002,7 +79706,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -74035,7 +79739,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -74063,33 +79774,40 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -74281,6 +79999,63 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _sparse_mask_projection + operator_name: _sparse_mask_projection + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate_matches + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate_matches + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _to_cpu operator_name: _to_cpu overload_name: '' @@ -74321,7 +80096,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + schema_string: aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74329,12 +80104,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74342,11 +80124,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: masked_grad + type: ::std::optional method_of: - Type - Tensor @@ -74368,7 +80157,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + schema_string: aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74376,12 +80165,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74389,11 +80184,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional method_of: - Type - Tensor @@ -74415,7 +80216,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor + schema_string: aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74427,7 +80228,13 @@ is_nullable: false name: input type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74439,6 +80246,12 @@ is_nullable: false name: input type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional method_of: - Type - namespace @@ -75355,17 +81168,17 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false -- name: to_sparse - operator_name: to_sparse - overload_name: '' + has_math_kernel: true +- name: _to_sparse + operator_name: _to_sparse + overload_name: sparse_dim manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + schema_string: aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75373,43 +81186,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout - is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: - annotation: null - default: c10::nullopt - dynamic_type: at::IntArrayRef - is_nullable: true - kwarg_only: true - name: blocksize - size: 2 - type: at::OptionalIntArrayRef + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & - annotation: null - default: c10::nullopt dynamic_type: int64_t - is_nullable: true - kwarg_only: true - name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::OptionalIntArrayRef, c10::optional) - schema_order_arguments: + is_nullable: false + name: sparse_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_sparse + operator_name: to_sparse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -75417,12 +81246,120 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse + operator_name: _to_sparse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional method_of: - Type - Tensor @@ -75452,12 +81389,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_csr + operator_name: _to_sparse_csr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75465,11 +81449,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75499,12 +81483,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_csc + operator_name: _to_sparse_csc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75512,11 +81543,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75552,12 +81583,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75571,11 +81602,70 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_bsr + operator_name: _to_sparse_bsr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional method_of: - Type - Tensor @@ -75611,12 +81701,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75630,11 +81720,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75646,6 +81736,103 @@ type: at::Tensor inplace: false is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_bsc + operator_name: _to_sparse_bsc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _to_sparse_semi_structured + operator_name: _to_sparse_semi_structured + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false abstract: true device_guard: true with_gil: false @@ -75664,12 +81851,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75677,11 +81864,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75703,7 +81890,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor + schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75738,7 +81925,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -75778,7 +81965,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -75804,7 +81991,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75838,7 +82025,13 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::OptionalIntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75872,6 +82065,12 @@ is_nullable: false name: groups type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef method_of: - Type - namespace @@ -78328,13 +84527,13 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -78342,33 +84541,33 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -78377,12 +84576,12 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -78433,13 +84632,13 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78447,33 +84646,33 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -78489,12 +84688,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78546,12 +84745,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Device, at::ScalarType, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Device, at::ScalarType, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78581,11 +84780,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78632,12 +84831,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78662,11 +84861,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78713,12 +84912,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78743,11 +84942,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -79160,12 +85359,12 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::can_cast(ScalarType from, ScalarType to) -> bool + schema_string: aten::can_cast(ScalarType from_, ScalarType to) -> bool arguments: - annotation: null dynamic_type: at::ScalarType is_nullable: false - name: from + name: from_ type: at::ScalarType - annotation: null dynamic_type: at::ScalarType @@ -79177,7 +85376,7 @@ - annotation: null dynamic_type: at::ScalarType is_nullable: false - name: from + name: from_ type: at::ScalarType - annotation: null dynamic_type: at::ScalarType @@ -79415,23 +85614,23 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) + schema_string: aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y - type: const at::Tensor & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79492,23 +85691,23 @@ is_nullable: false name: batch_first type: bool - schema_order_cpp_signature: ::std::tuple,::std::vector> (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_cpp_signature: ::std::tuple,::std::vector> (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y - type: const at::Tensor & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79618,14 +85817,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -79647,13 +85846,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -79687,12 +85886,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79713,18 +85912,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79778,12 +85977,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79804,18 +86003,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79875,12 +86074,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79895,12 +86094,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79911,18 +86110,18 @@ is_nullable: false name: cy type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79937,12 +86136,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -80008,14 +86207,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -80037,13 +86236,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -80161,13 +86360,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -80193,12 +86392,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81209,14 +87408,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81243,13 +87442,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81301,14 +87500,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81335,13 +87534,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81390,14 +87589,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81424,13 +87623,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81479,14 +87678,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81513,13 +87712,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -83162,6 +89361,61 @@ with_gil: false deprecated: false has_math_kernel: false +- name: masked_scatter_backward + operator_name: masked_scatter_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _masked_softmax operator_name: _masked_softmax overload_name: '' @@ -83180,18 +89434,18 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -83204,17 +89458,17 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -83254,12 +89508,12 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -83277,11 +89531,11 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -86648,11 +92902,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_and_ operator_name: bitwise_and_ overload_name: Tensor @@ -87039,11 +93293,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_or operator_name: bitwise_or overload_name: Scalar_Tensor @@ -87175,11 +93429,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_or_ operator_name: bitwise_or_ overload_name: Tensor @@ -87566,11 +93820,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_xor operator_name: bitwise_xor overload_name: Scalar_Tensor @@ -87702,11 +93956,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_xor_ operator_name: bitwise_xor_ overload_name: Tensor @@ -89512,15 +95766,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89536,14 +95790,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89578,13 +95832,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89597,12 +95851,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89632,13 +95886,13 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89646,12 +95900,12 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89693,13 +95947,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89719,12 +95973,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89766,13 +96020,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89792,12 +96046,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89839,13 +96093,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89865,12 +96119,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89906,13 +96160,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89926,12 +96180,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89966,13 +96220,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89985,12 +96239,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -90141,12 +96395,12 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -90159,11 +96413,11 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -90205,12 +96459,12 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -90223,11 +96477,11 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -90493,7 +96747,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -90517,28 +96771,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -90585,7 +96839,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -90609,28 +96863,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -94068,12 +100322,12 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -94086,11 +100340,11 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -94132,12 +100386,12 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -94150,11 +100404,11 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -94732,6 +100986,143 @@ with_gil: false deprecated: false has_math_kernel: false +- name: nonzero_static_out + operator_name: nonzero_static + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero_static + operator_name: nonzero_static + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: nonzero_numpy operator_name: nonzero_numpy overload_name: '' @@ -95716,7 +102107,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -95735,7 +102126,7 @@ is_nullable: false name: label_smoothing type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, double) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -95752,7 +102143,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -96235,7 +102626,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor + schema_string: aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -96243,13 +102634,13 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: N - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -96257,12 +102648,12 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: N - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -98044,13 +104435,13 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -98069,12 +104460,12 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -98122,13 +104513,13 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -98147,12 +104538,12 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -99775,7 +106166,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99783,7 +106174,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99801,7 +106192,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99869,7 +106260,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99877,7 +106268,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99895,7 +106286,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99960,19 +106351,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99980,7 +106371,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99994,19 +106385,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100070,19 +106461,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100090,7 +106481,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100104,19 +106495,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100164,19 +106555,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100184,7 +106575,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100197,19 +106588,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100251,19 +106642,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100271,7 +106662,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100284,19 +106675,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100343,7 +106734,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100351,7 +106742,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100369,7 +106760,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100411,24 +106802,24 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100441,17 +106832,17 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100497,24 +106888,24 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100527,17 +106918,17 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100583,24 +106974,24 @@ name: bins type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100613,17 +107004,17 @@ name: bins type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -101932,6 +108323,55 @@ with_gil: false deprecated: false has_math_kernel: false +- name: min_out + operator_name: min + overload_name: unary_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: fmin operator_name: fmin overload_name: '' @@ -102665,11 +109105,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102683,7 +109123,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102696,11 +109136,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102756,11 +109196,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102774,7 +109214,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102787,11 +109227,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102846,11 +109286,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102864,7 +109304,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102877,11 +109317,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102937,11 +109377,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102955,7 +109395,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102968,11 +109408,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103027,11 +109467,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103045,7 +109485,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103058,11 +109498,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103118,11 +109558,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103136,7 +109576,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103149,11 +109589,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103208,11 +109648,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103226,7 +109666,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103239,11 +109679,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103299,11 +109739,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103317,7 +109757,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103330,11 +109770,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103500,7 +109940,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103515,7 +109955,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, int64_t, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103527,7 +109967,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103661,7 +110101,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103676,7 +110116,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, int64_t, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103688,7 +110128,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103852,7 +110292,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -103866,7 +110306,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, at::Dimname, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103878,7 +110318,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104009,7 +110449,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104023,7 +110463,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, at::Dimname, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -104035,7 +110475,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104287,11 +110727,100 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true +- name: argsort_out + operator_name: argsort + overload_name: stable_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: stable + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: stable + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: argsort operator_name: argsort overload_name: dimname @@ -104355,7 +110884,7 @@ overload_name: values manual_kernel_registration: false category_override: '' - schema_string: aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + schema_string: aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) arguments: - allocate: true annotation: a! @@ -104473,7 +111002,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + schema_string: aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) arguments: - annotation: null dynamic_type: at::Tensor @@ -105954,13 +112483,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -105980,12 +112509,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -106027,13 +112556,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106053,12 +112582,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106101,13 +112630,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106121,12 +112650,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106169,13 +112698,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106189,12 +112718,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106236,13 +112765,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (double, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (double, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: double @@ -106255,12 +112784,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106302,13 +112831,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (double, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (double, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -106321,12 +112850,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106368,13 +112897,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106387,12 +112916,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106434,13 +112963,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106453,12 +112982,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106498,12 +113027,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -106511,7 +113040,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (double, double, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (double, double, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -106529,40 +113058,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106609,13 +113138,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (double, double, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (double, double, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: double @@ -106633,12 +113162,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106922,12 +113451,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -106935,11 +113464,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -106947,9 +113483,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -106967,12 +113510,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -106980,11 +113523,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -106992,9 +113542,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107009,12 +113566,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul - operator_name: _foreach_mul - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107022,11 +113579,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107034,10 +113591,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107054,12 +113611,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_ - operator_name: _foreach_mul_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107067,11 +113624,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107079,10 +113636,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107096,12 +113653,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div - operator_name: _foreach_div - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107109,11 +113666,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107121,9 +113685,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107141,12 +113712,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_ - operator_name: _foreach_div_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107154,11 +113725,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107166,9 +113744,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107183,12 +113768,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min - operator_name: _foreach_clamp_min +- name: _foreach_sub + operator_name: _foreach_sub overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107228,12 +113813,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_ - operator_name: _foreach_clamp_min_ +- name: _foreach_sub_ + operator_name: _foreach_sub_ overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107270,12 +113855,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: Scalar +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107283,11 +113868,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107295,9 +113887,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107315,12 +113914,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: Scalar +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107328,11 +113927,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107340,9 +113946,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107357,12 +113970,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum - operator_name: _foreach_maximum - overload_name: Scalar +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107370,11 +113983,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107382,10 +113995,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107402,12 +114015,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_ - operator_name: _foreach_maximum_ - overload_name: Scalar +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107415,11 +114028,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107427,10 +114040,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107444,12 +114057,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum +- name: _foreach_mul + operator_name: _foreach_mul overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107489,12 +114102,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ +- name: _foreach_mul_ + operator_name: _foreach_mul_ overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107531,12 +114144,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add - operator_name: _foreach_add +- name: _foreach_mul + operator_name: _foreach_mul overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + schema_string: aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107548,14 +114161,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107567,13 +114173,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & method_of: - Type - namespace @@ -107590,12 +114189,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_ - operator_name: _foreach_add_ +- name: _foreach_mul_ + operator_name: _foreach_mul_ overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107607,14 +114206,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107626,13 +114218,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & method_of: - Type - namespace @@ -107646,12 +114231,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub - overload_name: List +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + schema_string: aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107659,18 +114244,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107678,17 +114256,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107705,12 +114276,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ - overload_name: List +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + schema_string: aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107718,18 +114289,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107737,17 +114301,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107763,10 +114320,10 @@ has_math_kernel: false - name: _foreach_mul operator_name: _foreach_mul - overload_name: List + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107774,11 +114331,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107786,10 +114343,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -107808,10 +114365,10 @@ has_math_kernel: false - name: _foreach_mul_ operator_name: _foreach_mul_ - overload_name: List + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107819,11 +114376,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107831,10 +114388,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -107850,10 +114407,10 @@ has_math_kernel: false - name: _foreach_div operator_name: _foreach_div - overload_name: List + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107861,11 +114418,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107873,10 +114430,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -107895,10 +114452,10 @@ has_math_kernel: false - name: _foreach_div_ operator_name: _foreach_div_ - overload_name: List + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107906,11 +114463,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107918,10 +114475,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -107935,12 +114492,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min - operator_name: _foreach_clamp_min +- name: _foreach_div + operator_name: _foreach_div overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107980,12 +114537,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_ - operator_name: _foreach_clamp_min_ +- name: _foreach_div_ + operator_name: _foreach_div_ overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108022,12 +114579,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: List +- name: _foreach_div + operator_name: _foreach_div + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108035,11 +114592,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108047,10 +114604,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -108067,12 +114624,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: List +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108080,11 +114637,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108092,10 +114649,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -108109,12 +114666,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum - operator_name: _foreach_maximum - overload_name: List +- name: _foreach_div + operator_name: _foreach_div + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108122,11 +114679,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108134,10 +114691,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -108154,12 +114711,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_ - operator_name: _foreach_maximum_ - overload_name: List +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108167,11 +114724,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108179,10 +114736,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -108196,12 +114753,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum - overload_name: List +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108209,11 +114766,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108221,10 +114778,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108241,12 +114798,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ - overload_name: List +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108254,11 +114811,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108266,10 +114823,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108283,12 +114840,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add - operator_name: _foreach_add - overload_name: ScalarList +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108296,11 +114853,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108308,10 +114865,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108328,12 +114885,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_ - operator_name: _foreach_add_ - overload_name: ScalarList +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108341,11 +114898,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108353,10 +114910,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108370,12 +114927,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108415,12 +114972,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108457,12 +115014,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div - operator_name: _foreach_div - overload_name: ScalarList +- name: _foreach_clamp_min + operator_name: _foreach_clamp_min + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108470,11 +115027,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108482,10 +115039,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108502,12 +115059,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_ - operator_name: _foreach_div_ - overload_name: ScalarList +- name: _foreach_clamp_min_ + operator_name: _foreach_clamp_min_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108515,11 +115072,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108527,10 +115084,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108544,12 +115101,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul - operator_name: _foreach_mul - overload_name: ScalarList +- name: _foreach_clamp_min + operator_name: _foreach_clamp_min + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108557,11 +115114,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108569,10 +115126,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108589,12 +115146,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_ - operator_name: _foreach_mul_ - overload_name: ScalarList +- name: _foreach_clamp_min_ + operator_name: _foreach_clamp_min_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108602,11 +115159,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108614,10 +115171,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108718,12 +115275,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: ScalarList +- name: _foreach_maximum + operator_name: _foreach_maximum + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108731,11 +115288,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108743,10 +115300,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108763,12 +115320,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: ScalarList +- name: _foreach_maximum_ + operator_name: _foreach_maximum_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108776,11 +115333,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108788,10 +115345,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108807,10 +115364,10 @@ has_math_kernel: false - name: _foreach_maximum operator_name: _foreach_maximum - overload_name: ScalarList + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108818,11 +115375,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108830,10 +115387,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108852,10 +115409,10 @@ has_math_kernel: false - name: _foreach_maximum_ operator_name: _foreach_maximum_ - overload_name: ScalarList + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108863,11 +115420,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108875,10 +115432,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108892,12 +115449,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum +- name: _foreach_maximum + operator_name: _foreach_maximum overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108937,12 +115494,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ +- name: _foreach_maximum_ + operator_name: _foreach_maximum_ overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108979,25 +115536,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp - operator_name: _foreach_exp - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -109014,25 +115581,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero_ - operator_name: _foreach_zero_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -109046,56 +115623,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp_ - operator_name: _foreach_exp_ - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) - schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: [] - inplace: true - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _foreach_sqrt - operator_name: _foreach_sqrt - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_foreach_sqrt(Tensor[] self) -> Tensor[] - arguments: + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList method_of: - Type @@ -109113,25 +115668,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sqrt_ - operator_name: _foreach_sqrt_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList method_of: - Type - namespace @@ -109145,25 +115710,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs - operator_name: _foreach_abs - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -109180,25 +115755,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs_ - operator_name: _foreach_abs_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -109212,25 +115797,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos - operator_name: _foreach_acos - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109247,57 +115864,120 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos_ - operator_name: _foreach_acos_ - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin - operator_name: _foreach_asin - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109314,25 +115994,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin_ - operator_name: _foreach_asin_ - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109346,60 +116058,117 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan - operator_name: _foreach_atan - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan_ - operator_name: _foreach_atan_ - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109413,25 +116182,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil - operator_name: _foreach_ceil - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109448,57 +116249,120 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil_ - operator_name: _foreach_ceil_ - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos - operator_name: _foreach_cos - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109515,25 +116379,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos_ - operator_name: _foreach_cos_ - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109547,60 +116443,117 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh - operator_name: _foreach_cosh - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh_ - operator_name: _foreach_cosh_ - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109614,12 +116567,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf - operator_name: _foreach_erf +- name: _foreach_abs + operator_name: _foreach_abs overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_abs(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109649,12 +116602,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf_ - operator_name: _foreach_erf_ +- name: _foreach_abs_ + operator_name: _foreach_abs_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_abs_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109681,12 +116634,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc - operator_name: _foreach_erfc +- name: _foreach_acos + operator_name: _foreach_acos overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_acos(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109716,12 +116669,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc_ - operator_name: _foreach_erfc_ +- name: _foreach_acos_ + operator_name: _foreach_acos_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_acos_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109748,12 +116701,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1 - operator_name: _foreach_expm1 +- name: _foreach_asin + operator_name: _foreach_asin overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_asin(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109783,12 +116736,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1_ - operator_name: _foreach_expm1_ +- name: _foreach_asin_ + operator_name: _foreach_asin_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_asin_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109815,12 +116768,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor - operator_name: _foreach_floor +- name: _foreach_atan + operator_name: _foreach_atan overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_atan(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109850,12 +116803,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor_ - operator_name: _foreach_floor_ +- name: _foreach_atan_ + operator_name: _foreach_atan_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_atan_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109882,12 +116835,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log - operator_name: _foreach_log +- name: _foreach_ceil + operator_name: _foreach_ceil overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_ceil(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109917,12 +116870,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log_ - operator_name: _foreach_log_ +- name: _foreach_ceil_ + operator_name: _foreach_ceil_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_ceil_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109949,12 +116902,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10 - operator_name: _foreach_log10 +- name: _foreach_cos + operator_name: _foreach_cos overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_cos(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109984,12 +116937,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10_ - operator_name: _foreach_log10_ +- name: _foreach_cos_ + operator_name: _foreach_cos_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_cos_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110016,12 +116969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p - operator_name: _foreach_log1p +- name: _foreach_cosh + operator_name: _foreach_cosh overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_cosh(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110051,12 +117004,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p_ - operator_name: _foreach_log1p_ +- name: _foreach_cosh_ + operator_name: _foreach_cosh_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_cosh_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110083,12 +117036,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2 - operator_name: _foreach_log2 +- name: _foreach_erf + operator_name: _foreach_erf overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_erf(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110118,12 +117071,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2_ - operator_name: _foreach_log2_ +- name: _foreach_erf_ + operator_name: _foreach_erf_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_erf_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110150,12 +117103,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg - operator_name: _foreach_neg +- name: _foreach_erfc + operator_name: _foreach_erfc overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_erfc(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110185,12 +117138,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg_ - operator_name: _foreach_neg_ +- name: _foreach_erfc_ + operator_name: _foreach_erfc_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_erfc_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110217,12 +117170,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan - operator_name: _foreach_tan +- name: _foreach_exp + operator_name: _foreach_exp overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_exp(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110252,12 +117205,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan_ - operator_name: _foreach_tan_ +- name: _foreach_exp_ + operator_name: _foreach_exp_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110284,12 +117237,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh - operator_name: _foreach_tanh +- name: _foreach_expm1 + operator_name: _foreach_expm1 overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_expm1(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110319,12 +117272,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh_ - operator_name: _foreach_tanh_ +- name: _foreach_expm1_ + operator_name: _foreach_expm1_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_expm1_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110351,12 +117304,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin - operator_name: _foreach_sin +- name: _foreach_floor + operator_name: _foreach_floor overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_floor(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110386,12 +117339,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin_ - operator_name: _foreach_sin_ +- name: _foreach_floor_ + operator_name: _foreach_floor_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_floor_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110418,12 +117371,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh - operator_name: _foreach_sinh +- name: _foreach_frac + operator_name: _foreach_frac overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_frac(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110453,12 +117406,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh_ - operator_name: _foreach_sinh_ +- name: _foreach_frac_ + operator_name: _foreach_frac_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_frac_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110485,25 +117438,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round - operator_name: _foreach_round - overload_name: '' +- name: _foreach_lerp + operator_name: _foreach_lerp + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList method_of: - Type - namespace @@ -110520,25 +117493,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round_ - operator_name: _foreach_round_ - overload_name: '' +- name: _foreach_lerp_ + operator_name: _foreach_lerp_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList method_of: - Type - namespace @@ -110552,25 +117545,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma - operator_name: _foreach_lgamma - overload_name: '' +- name: _foreach_lerp + operator_name: _foreach_lerp + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & method_of: - Type - namespace @@ -110587,25 +117600,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma_ - operator_name: _foreach_lgamma_ - overload_name: '' +- name: _foreach_lerp_ + operator_name: _foreach_lerp_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) - schema_order_arguments: - - annotation: a! + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & method_of: - Type - namespace @@ -110619,12 +117652,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac - operator_name: _foreach_frac +- name: _foreach_lgamma + operator_name: _foreach_lgamma overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lgamma(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110654,12 +117687,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac_ - operator_name: _foreach_frac_ +- name: _foreach_lgamma_ + operator_name: _foreach_lgamma_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lgamma_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110686,12 +117719,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal - operator_name: _foreach_reciprocal +- name: _foreach_log + operator_name: _foreach_log overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110721,12 +117754,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal_ - operator_name: _foreach_reciprocal_ +- name: _foreach_log_ + operator_name: _foreach_log_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110753,12 +117786,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid - operator_name: _foreach_sigmoid +- name: _foreach_log10 + operator_name: _foreach_log10 overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log10(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110788,12 +117821,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid_ - operator_name: _foreach_sigmoid_ +- name: _foreach_log10_ + operator_name: _foreach_log10_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log10_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110820,12 +117853,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc - operator_name: _foreach_trunc +- name: _foreach_log1p + operator_name: _foreach_log1p overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log1p(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110855,12 +117888,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc_ - operator_name: _foreach_trunc_ +- name: _foreach_log1p_ + operator_name: _foreach_log1p_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log1p_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110887,121 +117920,60 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: Scalar +- name: _foreach_log2 + operator_name: _foreach_log2 + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + schema_string: aten::_foreach_log2(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: Scalar +- name: _foreach_log2_ + operator_name: _foreach_log2_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + schema_string: aten::_foreach_log2_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace @@ -111015,179 +117987,95 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: ScalarList +- name: _foreach_max + operator_name: _foreach_max + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + schema_string: aten::_foreach_max(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: Tensor +- name: _foreach_neg + operator_name: _foreach_neg + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + schema_string: aten::_foreach_neg(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: ScalarList +- name: _foreach_neg_ + operator_name: _foreach_neg_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + schema_string: aten::_foreach_neg_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace @@ -111201,74 +118089,71 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: Tensor +- name: _foreach_norm + operator_name: _foreach_norm + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + schema_string: aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor2 - type: at::TensorList + name: ord + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &, ::std::optional) schema_order_arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor2 - type: at::TensorList + name: ord + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv - overload_name: Scalar +- name: _foreach_pow + operator_name: _foreach_pow + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + schema_string: aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -111278,20 +118163,9 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -111301,19 +118175,8 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace @@ -111330,12 +118193,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul +- name: _foreach_pow + operator_name: _foreach_pow overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + schema_string: aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -111343,22 +118206,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -111366,20 +118218,9 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & method_of: - Type @@ -111397,54 +118238,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv +- name: _foreach_pow + operator_name: _foreach_pow overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef method_of: - Type @@ -111462,55 +118283,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv - overload_name: Tensor +- name: _foreach_pow + operator_name: _foreach_pow + overload_name: ScalarAndTensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + schema_string: aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[] arguments: - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList + type: const at::Scalar & - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) + schema_order_cpp_signature: ::std::vector (const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList + type: const at::Scalar & - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & method_of: - Type - namespace @@ -111527,14 +118328,14 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul - overload_name: ScalarList +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self @@ -111542,21 +118343,11 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self @@ -111564,130 +118355,124 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul - overload_name: Tensor +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + schema_string: aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) - schema_order_arguments: - - annotation: null + name: exponent + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> () + arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null + name: exponent + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::ArrayRef is_nullable: false - name: scalars - type: const at::Tensor & + name: exponent + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_norm - operator_name: _foreach_norm - overload_name: Scalar +- name: _foreach_reciprocal + operator_name: _foreach_reciprocal + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] + schema_string: aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & method_of: - Type - namespace @@ -111704,44 +118489,56 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp - operator_name: _foreach_lerp - overload_name: List +- name: _foreach_reciprocal_ + operator_name: _foreach_reciprocal_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] + schema_string: aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: weights - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round + operator_name: _foreach_round + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round(Tensor[] self) -> Tensor[] + arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList method_of: - Type @@ -111759,44 +118556,91 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_ - operator_name: _foreach_lerp_ - overload_name: List +- name: _foreach_round_ + operator_name: _foreach_round_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () + schema_string: aten::_foreach_round_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid + operator_name: _foreach_sigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid_ + operator_name: _foreach_sigmoid_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList method_of: - Type @@ -111811,45 +118655,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp - operator_name: _foreach_lerp - overload_name: Scalar +- name: _foreach_sign + operator_name: _foreach_sign + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] + schema_string: aten::_foreach_sign(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & method_of: - Type - namespace @@ -111866,45 +118690,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_ - operator_name: _foreach_lerp_ - overload_name: Scalar +- name: _foreach_sign_ + operator_name: _foreach_sign_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () + schema_string: aten::_foreach_sign_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & method_of: - Type - namespace @@ -111918,72 +118722,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize - operator_name: bucketize - overload_name: Tensor +- name: _foreach_sin + operator_name: _foreach_sin + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + schema_string: aten::_foreach_sin(Tensor[] self) -> Tensor[] arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: at::TensorList name: result - type: at::Tensor + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -111991,159 +118757,66 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize_out - operator_name: bucketize - overload_name: Tensor_out +- name: _foreach_sin_ + operator_name: _foreach_sin_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_foreach_sin_(Tensor(a!)[] self) -> () arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: bucketize - operator_name: bucketize - overload_name: Scalar +- name: _foreach_sinh + operator_name: _foreach_sinh + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + schema_string: aten::_foreach_sinh(Tensor[] self) -> Tensor[] arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: at::TensorList name: result - type: at::Tensor + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -112151,26 +118824,470 @@ with_gil: false deprecated: false has_math_kernel: false -- name: searchsorted - operator_name: searchsorted - overload_name: Tensor +- name: _foreach_sinh_ + operator_name: _foreach_sinh_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + schema_string: aten::_foreach_sinh_(Tensor(a!)[] self) -> () arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt + operator_name: _foreach_sqrt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt_ + operator_name: _foreach_sqrt_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan + operator_name: _foreach_tan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan_ + operator_name: _foreach_tan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh + operator_name: _foreach_tanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh_ + operator_name: _foreach_tanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc + operator_name: _foreach_trunc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc_ + operator_name: _foreach_trunc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_zero_ + operator_name: _foreach_zero_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_zero_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy_ + operator_name: _foreach_copy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: void (at::TensorList, at::TensorList, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy + operator_name: _foreach_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool is_nullable: false kwarg_only: true name: out_int32 @@ -112182,21 +119299,254 @@ kwarg_only: true name: right type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: - annotation: null - default: c10::nullopt + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize_out + operator_name: bucketize + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112223,19 +119573,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -112291,20 +119641,20 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112331,19 +119681,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -112399,20 +119749,20 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, ::std::optional, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112439,19 +119789,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -112468,6 +119818,121 @@ with_gil: false deprecated: false has_math_kernel: false +- name: searchsorted_out + operator_name: searchsorted + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, ::std::optional, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _convert_indices_from_coo_to_csr operator_name: _convert_indices_from_coo_to_csr overload_name: '' @@ -113130,14 +120595,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113166,7 +120631,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113230,14 +120695,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113266,7 +120731,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113333,14 +120798,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113372,7 +120837,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113439,14 +120904,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113478,7 +120943,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113968,7 +121433,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113981,7 +121446,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113998,7 +121463,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114056,7 +121521,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114069,7 +121534,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114086,7 +121551,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114137,7 +121602,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114150,7 +121615,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114167,7 +121632,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114231,7 +121696,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114242,7 +121707,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114258,7 +121723,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114323,7 +121788,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114334,7 +121799,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114350,7 +121815,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114415,7 +121880,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114431,7 +121896,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114452,7 +121917,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114517,7 +121982,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114533,7 +121998,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114554,7 +122019,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114615,7 +122080,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114628,7 +122093,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114645,7 +122110,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114703,7 +122168,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114716,7 +122181,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114733,7 +122198,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114797,7 +122262,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114808,7 +122273,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114824,7 +122289,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114889,7 +122354,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114900,7 +122365,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114916,7 +122381,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114981,7 +122446,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114997,7 +122462,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -115018,7 +122483,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -115083,7 +122548,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -115099,7 +122564,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -115120,7 +122585,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -118227,12 +125692,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -118263,11 +125728,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -118327,12 +125792,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -118363,11 +125828,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -118515,12 +125980,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -118551,11 +126016,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120248,12 +127713,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120293,11 +127758,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120366,12 +127831,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120411,11 +127876,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120488,8 +127953,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120533,7 +127998,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120606,8 +128071,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120651,7 +128116,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120720,12 +128185,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120765,11 +128230,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120838,12 +128303,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120883,11 +128348,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120960,8 +128425,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -121005,7 +128470,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -121078,8 +128543,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -121123,7 +128588,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -122785,7 +130250,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -122856,7 +130321,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + schema_string: aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -122913,7 +130378,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -123008,7 +130473,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + schema_string: aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -124568,12 +132033,12 @@ name: mode type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124591,11 +132056,11 @@ name: mode type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -124636,12 +132101,12 @@ name: mode type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::string_view, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::string_view, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124660,11 +132125,11 @@ name: mode type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -124707,8 +132172,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124729,7 +132194,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124772,8 +132237,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124794,7 +132259,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124837,8 +132302,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124859,7 +132324,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124902,8 +132367,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124924,7 +132389,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124967,8 +132432,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124989,7 +132454,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125032,8 +132497,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125054,7 +132519,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125092,8 +132557,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125109,7 +132574,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125147,8 +132612,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125164,7 +132629,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125202,8 +132667,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125219,7 +132684,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125257,8 +132722,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125274,7 +132739,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125312,8 +132777,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125329,7 +132794,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125367,8 +132832,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125384,7 +132849,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125432,12 +132897,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125456,11 +132921,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125508,12 +132973,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125532,11 +132997,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125590,12 +133055,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125620,11 +133085,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125678,12 +133143,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125708,11 +133173,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125760,18 +133225,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125790,17 +133255,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125848,18 +133313,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125878,17 +133343,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125942,18 +133407,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125978,17 +133443,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126042,18 +133507,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126078,17 +133543,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126136,18 +133601,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126166,17 +133631,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126224,18 +133689,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126254,17 +133719,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126318,18 +133783,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126354,17 +133819,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126418,18 +133883,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126454,17 +133919,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126512,18 +133977,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126542,17 +134007,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126600,18 +134065,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126630,17 +134095,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126694,18 +134159,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126730,17 +134195,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126794,18 +134259,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126830,17 +134295,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126888,18 +134353,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126918,17 +134383,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126976,18 +134441,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127006,17 +134471,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127070,18 +134535,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127106,17 +134571,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127170,18 +134635,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127206,17 +134671,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127264,24 +134729,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127300,23 +134765,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127364,24 +134829,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127400,23 +134865,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127470,24 +134935,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127512,23 +134977,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127582,24 +135047,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127624,23 +135089,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127683,12 +135148,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127702,11 +135167,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127756,12 +135221,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127775,11 +135240,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127822,12 +135287,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127841,11 +135306,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127881,12 +135346,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127900,11 +135365,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127953,12 +135418,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127978,11 +135443,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128038,12 +135503,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128063,11 +135528,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128116,12 +135581,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128141,11 +135606,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128187,12 +135652,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128212,11 +135677,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128259,18 +135724,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128284,17 +135749,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128344,18 +135809,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128369,17 +135834,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128422,18 +135887,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128447,17 +135912,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128493,18 +135958,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128518,17 +135983,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128577,18 +136042,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128608,17 +136073,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128674,18 +136139,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128705,17 +136170,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128764,18 +136229,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128795,17 +136260,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128847,18 +136312,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128878,17 +136343,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128931,24 +136396,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128962,23 +136427,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129028,24 +136493,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129059,23 +136524,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129118,24 +136583,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129149,23 +136614,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129201,24 +136666,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129232,23 +136697,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129297,24 +136762,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129334,23 +136799,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129406,24 +136871,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129443,23 +136908,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129508,24 +136973,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129545,23 +137010,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129603,24 +137068,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129640,23 +137105,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129802,12 +137267,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129820,11 +137285,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129866,12 +137331,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129884,11 +137349,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -130014,7 +137479,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130044,7 +137509,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130073,7 +137538,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130096,7 +137561,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130153,7 +137618,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor + schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130176,7 +137641,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130205,7 +137670,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130228,7 +137693,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130278,7 +137743,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130308,7 +137773,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130337,7 +137802,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130360,7 +137825,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130417,7 +137882,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor + schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130440,7 +137905,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130469,7 +137934,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130492,7 +137957,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130542,7 +138007,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130572,7 +138037,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130587,7 +138052,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130610,7 +138075,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130653,7 +138118,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130676,7 +138141,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130691,7 +138156,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130714,7 +138179,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130750,7 +138215,7 @@ overload_name: output manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) + schema_string: aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130779,7 +138244,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130792,7 +138257,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130814,7 +138279,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130855,7 +138320,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor + schema_string: aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130877,7 +138342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130890,7 +138355,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130912,7 +138377,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130946,7 +138411,7 @@ overload_name: grad_input manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -131085,7 +138550,7 @@ overload_name: output_mask manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + schema_string: aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -131195,7 +138660,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131224,7 +138689,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131243,7 +138708,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131265,7 +138730,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131312,7 +138777,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor + schema_string: aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131334,7 +138799,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131353,7 +138818,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131375,7 +138840,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131415,7 +138880,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor + schema_string: aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131437,7 +138902,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131456,7 +138921,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131478,7 +138943,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131518,7 +138983,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131548,7 +139013,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131563,7 +139028,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131586,7 +139051,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131629,7 +139094,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor + schema_string: aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131652,7 +139117,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131667,7 +139132,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131690,7 +139155,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131726,7 +139191,7 @@ overload_name: output manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131755,7 +139220,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131768,7 +139233,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131790,7 +139255,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131831,7 +139296,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor + schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131853,7 +139318,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131866,7 +139331,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131888,7 +139353,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131922,7 +139387,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor + schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131945,7 +139410,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131967,7 +139432,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131990,7 +139455,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -132033,7 +139498,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor + schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -132056,7 +139521,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -132078,7 +139543,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -132101,7 +139566,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -135388,12 +142853,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -135401,11 +142866,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -135442,12 +142907,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -135455,11 +142920,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136101,13 +143566,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136120,12 +143585,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136472,12 +143937,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136490,11 +143955,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136516,7 +143981,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136524,11 +143989,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136536,12 +144001,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136549,11 +144014,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136561,11 +144026,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136587,7 +144052,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136602,11 +144067,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136614,12 +144079,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136627,11 +144092,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136639,11 +144104,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136672,7 +144137,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136680,11 +144145,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136692,12 +144157,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136705,11 +144170,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136717,11 +144182,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136743,7 +144208,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136758,11 +144223,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136770,12 +144235,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136783,11 +144248,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136795,11 +144260,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136828,7 +144293,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136836,11 +144301,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136848,12 +144313,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136861,11 +144326,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136873,11 +144338,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136899,7 +144364,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136914,11 +144379,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136926,12 +144391,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136939,11 +144404,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136951,11 +144416,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136984,7 +144449,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136992,11 +144457,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137004,12 +144469,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137017,11 +144482,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137029,11 +144494,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137055,7 +144520,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137070,11 +144535,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137082,12 +144547,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137095,11 +144560,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137107,11 +144572,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137140,7 +144605,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137148,11 +144613,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137160,12 +144625,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137173,11 +144638,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137185,11 +144650,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137211,7 +144676,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137226,11 +144691,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137238,12 +144703,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137251,11 +144716,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137263,11 +144728,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137296,7 +144761,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137304,11 +144769,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137316,12 +144781,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137329,11 +144794,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137341,11 +144806,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137367,7 +144832,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137382,11 +144847,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137394,12 +144859,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137407,11 +144872,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137419,11 +144884,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137452,7 +144917,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137460,7 +144925,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137474,12 +144939,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137487,7 +144952,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137501,11 +144966,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137527,7 +144992,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137542,7 +145007,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137556,12 +145021,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137569,7 +145034,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137583,11 +145048,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137616,7 +145081,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137624,7 +145089,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137638,12 +145103,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137651,7 +145116,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137665,11 +145130,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137691,7 +145156,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137706,7 +145171,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137720,12 +145185,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137733,7 +145198,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137747,11 +145212,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137780,7 +145245,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137788,7 +145253,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137802,12 +145267,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137815,7 +145280,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137829,11 +145294,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137855,7 +145320,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137870,7 +145335,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137884,12 +145349,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137897,7 +145362,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137911,11 +145376,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137944,7 +145409,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137952,7 +145417,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137966,12 +145431,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137979,7 +145444,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137993,11 +145458,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138019,7 +145484,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138034,7 +145499,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138048,12 +145513,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138061,7 +145526,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138075,11 +145540,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138108,7 +145573,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138116,7 +145581,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138130,12 +145595,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138143,7 +145608,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138157,11 +145622,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138183,7 +145648,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138198,7 +145663,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138212,12 +145677,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138225,7 +145690,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138239,11 +145704,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138272,7 +145737,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138280,7 +145745,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138294,12 +145759,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138307,7 +145772,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138321,11 +145786,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138347,7 +145812,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138362,7 +145827,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138376,12 +145841,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138389,7 +145854,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138403,11 +145868,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138436,7 +145901,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138444,26 +145909,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138471,25 +145936,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138511,7 +145976,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138526,26 +145991,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138553,25 +146018,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138600,7 +146065,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138608,26 +146073,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138635,25 +146100,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138675,7 +146140,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138690,26 +146155,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138717,25 +146182,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138764,7 +146229,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138772,26 +146237,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138799,25 +146264,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138839,7 +146304,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138854,26 +146319,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138881,25 +146346,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138928,7 +146393,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138936,26 +146401,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138963,25 +146428,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139003,7 +146468,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139018,26 +146483,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139045,25 +146510,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139092,7 +146557,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -139100,26 +146565,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139127,25 +146592,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139167,7 +146632,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139182,26 +146647,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139209,25 +146674,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139256,7 +146721,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -139264,26 +146729,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139291,25 +146756,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139331,7 +146796,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139346,26 +146811,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139373,25 +146838,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139440,7 +146905,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -139454,33 +146919,33 @@ name: d type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139583,7 +147048,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -139597,33 +147062,33 @@ name: d type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139714,7 +147179,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139728,7 +147193,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139763,7 +147228,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139777,7 +147242,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -140260,7 +147725,7 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false @@ -140346,7 +147811,7 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false @@ -141645,19 +149110,19 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -141670,18 +149135,18 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -141761,19 +149226,19 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -141786,18 +149251,18 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -142663,6 +150128,41 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _linalg_eigvals + operator_name: _linalg_eigvals + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_eigvals(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: linalg_eigvals operator_name: linalg_eigvals overload_name: '' @@ -142742,11 +150242,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _linalg_eigh operator_name: _linalg_eigh overload_name: '' @@ -143892,13 +151392,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143911,13 +151411,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -143925,13 +151425,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143944,12 +151444,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -143984,7 +151484,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143997,13 +151497,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144016,7 +151516,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144029,12 +151529,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144071,13 +151571,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144090,13 +151590,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144104,13 +151604,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144123,12 +151623,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144177,7 +151677,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144190,13 +151690,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144209,7 +151709,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144222,12 +151722,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144270,7 +151770,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144283,13 +151783,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144303,7 +151803,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144316,12 +151816,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144364,7 +151864,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144377,13 +151877,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144397,7 +151897,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144410,12 +151910,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144469,13 +151969,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144500,12 +152000,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144559,13 +152059,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144590,12 +152090,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144650,13 +152150,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144682,12 +152182,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144742,13 +152242,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144774,12 +152274,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144828,13 +152328,13 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144854,12 +152354,12 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144934,13 +152434,13 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144960,12 +152460,12 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145034,13 +152534,13 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145054,12 +152554,12 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -145128,13 +152628,13 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145148,12 +152648,12 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145216,13 +152716,13 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145230,12 +152730,12 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -145272,13 +152772,13 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145286,12 +152786,12 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145328,12 +152828,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145341,11 +152841,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -145382,12 +152882,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145395,11 +152895,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145545,14 +153045,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145560,7 +153060,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145573,14 +153073,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145629,14 +153129,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145644,7 +153144,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145657,14 +153157,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145708,19 +153208,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145728,7 +153228,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145736,19 +153236,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145792,19 +153292,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145812,7 +153312,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145820,19 +153320,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -146602,6 +154102,65 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _spsolve + operator_name: _spsolve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_spsolve(Tensor A, Tensor B, *, bool left=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: linalg_solve_out operator_name: linalg_solve overload_name: out @@ -146801,7 +154360,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146819,7 +154378,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146865,7 +154424,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146883,7 +154442,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -147169,14 +154728,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147184,7 +154743,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147197,14 +154756,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147253,14 +154812,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147268,7 +154827,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147281,14 +154840,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147332,19 +154891,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147352,7 +154911,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147360,19 +154919,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147416,19 +154975,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147436,7 +154995,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147444,19 +155003,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147845,7 +155404,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -147863,7 +155422,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -147941,6 +155500,63 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _test_parallel_materialize + operator_name: _test_parallel_materialize + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_parallel + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: skip_first + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_parallel + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: skip_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _test_optional_intlist operator_name: _test_optional_intlist overload_name: '' @@ -148049,8 +155665,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148061,7 +155677,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -148463,21 +156079,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148493,13 +156109,13 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148517,21 +156133,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148547,12 +156163,12 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -148602,14 +156218,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148618,13 +156234,13 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, int64_t, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148652,14 +156268,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148668,12 +156284,12 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -148695,7 +156311,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + schema_string: aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor arguments: - annotation: null dynamic_type: at::TensorList @@ -148714,7 +156330,13 @@ is_nullable: false name: padding_value type: double - schema_order_cpp_signature: at::Tensor (at::TensorList, bool, double) + - annotation: null + default: '"right"' + dynamic_type: c10::string_view + is_nullable: false + name: padding_side + type: c10::string_view + schema_order_cpp_signature: at::Tensor (at::TensorList, bool, double, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -148733,6 +156355,12 @@ is_nullable: false name: padding_value type: double + - annotation: null + default: '"right"' + dynamic_type: c10::string_view + is_nullable: false + name: padding_side + type: c10::string_view method_of: - Type - namespace @@ -148842,30 +156470,30 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::TensorList, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -148873,29 +156501,29 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -149175,12 +156803,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -149198,11 +156826,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -149603,24 +157231,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -149634,17 +157262,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -150770,7 +158398,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -150788,7 +158416,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -150809,6 +158437,130 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _jagged_to_padded_dense_forward + operator_name: _jagged_to_padded_dense_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: max_lengths + type: at::IntArrayRef + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, at::IntArrayRef, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: max_lengths + type: at::IntArrayRef + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _padded_dense_to_jagged_forward + operator_name: _padded_dense_to_jagged_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_padded_dense_to_jagged_forward(Tensor dense, Tensor[] offsets, SymInt? total_L=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: total_L + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: total_L + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _nested_tensor_softmax_with_shape operator_name: _nested_tensor_softmax_with_shape overload_name: '' @@ -150854,6 +158606,63 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _safe_softmax + operator_name: _safe_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _transformer_encoder_layer_fwd operator_name: _transformer_encoder_layer_fwd overload_name: '' @@ -150956,14 +158765,14 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151060,13 +158869,13 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -151140,7 +158949,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -151154,12 +158963,12 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151211,7 +159020,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -151225,11 +159034,11 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -151254,7 +159063,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor + schema_string: aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -151276,7 +159085,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151289,7 +159098,21 @@ is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151311,7 +159134,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151324,6 +159147,20 @@ is_nullable: false name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool method_of: - Type - namespace @@ -151340,12 +159177,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _scaled_dot_product_attention - operator_name: _scaled_dot_product_attention +- name: _fused_sdp_choice + operator_name: _fused_sdp_choice overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) + schema_string: aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int arguments: - annotation: null dynamic_type: at::Tensor @@ -151367,7 +159204,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151378,15 +159215,23 @@ default: false dynamic_type: bool is_nullable: false - name: need_attn_weights + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + kwarg_only: true + name: enable_gqa type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool, bool) + schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151408,7 +159253,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151419,39 +159264,44 @@ default: false dynamic_type: bool is_nullable: false - name: need_attn_weights + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + kwarg_only: true + name: enable_gqa type: bool method_of: - Type - namespace mode: native - python_module: nn + python_module: '' returns: - - dynamic_type: at::Tensor - name: result0 - type: at::Tensor - - dynamic_type: at::Tensor - name: result1 - type: at::Tensor + - dynamic_type: int64_t + name: result + type: int64_t inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: _fused_sdp_choice - operator_name: _fused_sdp_choice + has_math_kernel: false +- name: _scaled_dot_product_attention_math + operator_name: _scaled_dot_product_attention_math overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int + schema_string: aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -151473,7 +159323,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151486,7 +159336,27 @@ is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool) + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: dropout_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151508,7 +159378,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151521,28 +159391,51 @@ is_nullable: false name: is_causal type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: dropout_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: int64_t - name: result - type: int64_t + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false -- name: _scaled_dot_product_attention_math - operator_name: _scaled_dot_product_attention_math + has_math_kernel: true +- name: _scaled_dot_product_attention_math_for_mps + operator_name: _scaled_dot_product_attention_math_for_mps overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_attention_math_for_mps(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -151564,7 +159457,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151582,8 +159475,15 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_mask - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool, const c10::optional &) + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151605,7 +159505,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151623,7 +159523,14 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_mask - type: const c10::optional & + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151638,17 +159545,17 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _scaled_dot_product_flash_attention operator_name: _scaled_dot_product_flash_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask) + schema_string: aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor @@ -151683,7 +159590,14 @@ is_nullable: false name: return_debug_mask type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151718,6 +159632,13 @@ is_nullable: false name: return_debug_mask type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151725,8 +159646,8 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: ouput - name: ouput + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor field_name: logsumexp @@ -151743,19 +159664,19 @@ - dynamic_type: int64_t field_name: max_q name: max_q - type: int64_t + type: c10::SymInt - dynamic_type: int64_t field_name: max_k name: max_k - type: int64_t - - dynamic_type: int64_t + type: c10::SymInt + - dynamic_type: at::Tensor field_name: philox_seed name: philox_seed - type: int64_t - - dynamic_type: int64_t + type: at::Tensor + - dynamic_type: at::Tensor field_name: philox_offset name: philox_offset - type: int64_t + type: at::Tensor - dynamic_type: at::Tensor field_name: debug_attn_mask name: debug_attn_mask @@ -151767,18 +159688,13 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_flash_attention_backward - operator_name: _scaled_dot_product_flash_attention_backward +- name: _scaled_dot_product_flash_attention_for_cpu + operator_name: _scaled_dot_product_flash_attention_for_cpu overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + schema_string: aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp) arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_out - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -151795,62 +159711,33 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: out - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: logsumexp - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_q - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_k - type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - - annotation: null + default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - annotation: null + default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_seed - type: int64_t + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_offset - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, int64_t, int64_t) + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_out - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -151867,55 +159754,31 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: out - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: logsumexp - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_q - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_k - type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - - annotation: null + default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - annotation: null + default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_seed - type: int64_t + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_offset - type: int64_t + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151923,16 +159786,12 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: grad_query - name: grad_query - type: at::Tensor - - dynamic_type: at::Tensor - field_name: grad_key - name: grad_key + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - field_name: grad_value - name: grad_value + field_name: logsumexp + name: logsumexp type: at::Tensor inplace: false is_factory_method: false @@ -151941,12 +159800,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_efficient_attention - operator_name: _scaled_dot_product_efficient_attention +- name: _scaled_dot_product_fused_attention_overrideable + operator_name: _scaled_dot_product_fused_attention_overrideable overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_fused_attention_overrideable(Tensor query, Tensor key, Tensor value, Tensor? attn_bias=None, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor @@ -151964,17 +159823,37 @@ name: value type: const at::Tensor & - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: compute_log_sumexp + name: is_causal type: bool - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + name: return_debug_mask type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151992,16 +159871,36 @@ name: value type: const at::Tensor & - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: compute_log_sumexp + name: is_causal type: bool - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + name: return_debug_mask type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152009,10 +159908,40 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_q + name: cum_seq_q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_k + name: cum_seq_k + type: at::Tensor + - dynamic_type: int64_t + field_name: max_q + name: max_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_k + name: max_k + type: c10::SymInt + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask type: at::Tensor inplace: false is_factory_method: false @@ -152021,17 +159950,17 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_efficient_attention_backward - operator_name: _scaled_dot_product_efficient_attention_backward +- name: _scaled_dot_product_flash_attention_backward + operator_name: _scaled_dot_product_flash_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_out_ + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -152059,23 +159988,58 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false + dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: chunk_grad_outputs - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_out_ + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -152102,100 +160066,71 @@ is_nullable: false name: logsumexp type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: is_causal - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: chunk_grad_outputs - type: bool - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result0 - type: at::Tensor - - dynamic_type: at::Tensor - name: result1 - type: at::Tensor - - dynamic_type: at::Tensor - name: result2 - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _chunk_grad_outputs_efficient_attention - operator_name: _chunk_grad_outputs_efficient_attention - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool - arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: query + name: cum_seq_q type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + name: cum_seq_k type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: value - type: const at::Tensor & + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null - default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: query - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: value + name: philox_offset type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: is_causal - type: bool + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: bool - name: result - type: bool + - dynamic_type: at::Tensor + field_name: grad_query + name: grad_query + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_key + name: grad_key + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_value + name: grad_value + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -152203,13 +160138,18 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _flash_attention_forward - operator_name: _flash_attention_forward +- name: _scaled_dot_product_flash_attention_for_cpu_backward + operator_name: _scaled_dot_product_flash_attention_for_cpu_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask) + schema_string: aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152228,23 +160168,13 @@ - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_q + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_k + name: logsumexp type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - annotation: null dynamic_type: double is_nullable: false @@ -152256,12 +160186,26 @@ name: is_causal type: bool - annotation: null - dynamic_type: bool - is_nullable: false - name: return_debug_mask - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, bool) + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152280,23 +160224,13 @@ - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_q + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_k + name: logsumexp type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - annotation: null dynamic_type: double is_nullable: false @@ -152308,10 +160242,19 @@ name: is_causal type: bool - annotation: null - dynamic_type: bool - is_nullable: false - name: return_debug_mask - type: bool + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152319,24 +160262,16 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: output - name: output + field_name: grad_query + name: grad_query type: at::Tensor - dynamic_type: at::Tensor - field_name: softmax_logsumexp - name: softmax_logsumexp + field_name: grad_key + name: grad_key type: at::Tensor - - dynamic_type: int64_t - field_name: philox_seed - name: philox_seed - type: int64_t - - dynamic_type: int64_t - field_name: philox_offset - name: philox_offset - type: int64_t - dynamic_type: at::Tensor - field_name: debug_attn_mask - name: debug_attn_mask + field_name: grad_value + name: grad_value type: at::Tensor inplace: false is_factory_method: false @@ -152345,12 +160280,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _flash_attention_backward - operator_name: _flash_attention_backward +- name: _scaled_dot_product_fused_attention_overrideable_backward + operator_name: _scaled_dot_product_fused_attention_overrideable_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -152372,6 +160307,16 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152413,16 +160358,23 @@ name: is_causal type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_seed - type: int64_t + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_offset - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, int64_t, int64_t) + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152444,6 +160396,16 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152485,15 +160447,22 @@ name: is_causal type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_seed - type: int64_t + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_offset - type: int64_t + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152501,13 +160470,20 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: grad_query + name: grad_query type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: grad_key + name: grad_key type: at::Tensor - dynamic_type: at::Tensor - name: result2 + field_name: grad_value + name: grad_value + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_attn_bias + name: grad_attn_bias type: at::Tensor inplace: false is_factory_method: false @@ -152516,12 +160492,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _efficient_attention_forward - operator_name: _efficient_attention_forward +- name: _scaled_dot_product_efficient_attention + operator_name: _scaled_dot_product_efficient_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset) arguments: - annotation: null dynamic_type: at::Tensor @@ -152541,31 +160517,33 @@ - annotation: null dynamic_type: at::Tensor is_nullable: true - name: cu_seqlens_q - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: cu_seqlens_k - type: const c10::optional & - - annotation: null - dynamic_type: int64_t - is_nullable: true - name: max_seqlen_q - type: c10::optional + name: attn_bias + type: const ::std::optional & - annotation: null - default: false dynamic_type: bool is_nullable: false name: compute_log_sumexp type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null default: false dynamic_type: bool is_nullable: false - name: causal + name: is_causal type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, c10::optional, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, double, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152585,30 +160563,32 @@ - annotation: null dynamic_type: at::Tensor is_nullable: true - name: cu_seqlens_q - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: cu_seqlens_k - type: const c10::optional & - - annotation: null - dynamic_type: int64_t - is_nullable: true - name: max_seqlen_q - type: c10::optional + name: attn_bias + type: const ::std::optional & - annotation: null - default: false dynamic_type: bool is_nullable: false name: compute_log_sumexp type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null default: false dynamic_type: bool is_nullable: false - name: causal + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152616,10 +160596,20 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: log_sumexp + name: log_sumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset type: at::Tensor inplace: false is_factory_method: false @@ -152628,12 +160618,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _efficient_attention_backward - operator_name: _efficient_attention_backward +- name: _scaled_dot_product_efficient_attention_backward + operator_name: _scaled_dot_product_efficient_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -152655,6 +160645,11 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152666,18 +160661,39 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: is_causal - type: bool + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null default: false dynamic_type: bool is_nullable: false - name: chunk_grad_outputs + name: is_causal type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, ::std::array, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152699,6 +160715,11 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152710,17 +160731,38 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: is_causal - type: bool + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null default: false dynamic_type: bool is_nullable: false - name: chunk_grad_outputs + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152736,6 +160778,9 @@ - dynamic_type: at::Tensor name: result2 type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -152743,57 +160788,115 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _triton_scaled_dot_attention - operator_name: _triton_scaled_dot_attention +- name: _scaled_dot_product_cudnn_attention + operator_name: _scaled_dot_product_cudnn_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + schema_string: aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: q + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: k + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: v + name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_log_sumexp + type: bool - annotation: null default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double) + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: q + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: k + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: v + name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_log_sumexp + type: bool - annotation: null default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152801,7 +160904,40 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_q + name: cum_seq_q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_k + name: cum_seq_k + type: at::Tensor + - dynamic_type: int64_t + field_name: max_q + name: max_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_k + name: max_k + type: c10::SymInt + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask type: at::Tensor inplace: false is_factory_method: false @@ -152810,13 +160946,18 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _triton_multi_head_attention - operator_name: _triton_multi_head_attention +- name: _scaled_dot_product_cudnn_attention_backward + operator_name: _scaled_dot_product_cudnn_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + schema_string: aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor) arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152833,43 +160974,74 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: embed_dim - type: int64_t + name: out + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: num_head - type: int64_t + name: logsumexp + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_offset type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: attn_bias type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: cum_seq_q type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152886,41 +161058,67 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: embed_dim - type: int64_t + name: out + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: num_head - type: int64_t + name: logsumexp + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_offset type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: attn_bias type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: cum_seq_q type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152928,7 +161126,13 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 type: at::Tensor inplace: false is_factory_method: false @@ -152937,315 +161141,409 @@ with_gil: false deprecated: false has_math_kernel: false -- name: special_airy_ai - operator_name: special_airy_ai +- name: _flash_attention_forward + operator_name: _flash_attention_forward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::special_airy_ai(Tensor x) -> Tensor + schema_string: aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None, Tensor? seqused_k=None, Tensor? alibi_slopes=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: query type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: key type: const at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: special - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: special_airy_ai_out - operator_name: special_airy_ai - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: value type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false - name: x - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: special - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _transformer_decoder_only_layer_fwd - operator_name: _transformer_decoder_only_layer_fwd - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor) - arguments: + is_nullable: true + name: cum_seq_q + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor - is_nullable: false - name: src - type: const at::Tensor & + is_nullable: true + name: cum_seq_k + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_heads + name: max_k type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqused_k + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: alibi_slopes + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, double, bool, bool, ::std::optional, ::std::optional, ::std::optional, const ::std::optional &, const ::std::optional &) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: cum_seq_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cum_seq_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t is_nullable: false - name: proj_bias - type: const at::Tensor & + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null dynamic_type: bool is_nullable: false - name: use_gelu + name: is_causal type: bool - annotation: null dynamic_type: bool is_nullable: false - name: norm_first + name: return_debug_mask type: bool - annotation: null + default: ::std::nullopt dynamic_type: double - is_nullable: false - name: eps - type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: seqused_k + type: const ::std::optional & - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: alibi_slopes + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: softmax_logsumexp + name: softmax_logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _flash_attention_backward + operator_name: _flash_attention_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None) -> (Tensor, Tensor, Tensor) + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_2 + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_2 + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_1 + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_1 + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_2 + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_2 + name: logsumexp type: const at::Tensor & - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: incr_key - type: const c10::optional & - - annotation: null - default: '{}' dynamic_type: at::Tensor - is_nullable: true - name: incr_value - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &) - schema_order_arguments: + is_nullable: false + name: cum_seq_q + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: src + name: cum_seq_k type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_heads + name: max_k type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: dropout_p + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: is_causal + type: bool - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: philox_offset type: const at::Tensor & - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional - annotation: null - dynamic_type: double + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: eps - type: double + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_1 + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_1 + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_2 + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_2 + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_1 + name: logsumexp type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_1 + name: cum_seq_q type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_2 + name: cum_seq_k type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_2 + name: philox_seed type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: scale + type: ::std::optional - annotation: null - default: '{}' - dynamic_type: at::Tensor + default: ::std::nullopt + dynamic_type: int64_t is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: window_size_left + type: ::std::optional - annotation: null - default: '{}' - dynamic_type: at::Tensor + default: ::std::nullopt + dynamic_type: int64_t is_nullable: true - name: incr_value - type: const c10::optional & + kwarg_only: true + name: window_size_right + type: ::std::optional method_of: - Type - namespace @@ -153268,12 +161566,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _native_decoder_only_multi_head_attention - operator_name: _native_decoder_only_multi_head_attention +- name: _efficient_attention_forward + operator_name: _efficient_attention_forward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor) + schema_string: aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt? max_seqlen_q, SymInt? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? seqlen_k=None, int? window_size=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k) arguments: - annotation: null dynamic_type: at::Tensor @@ -153290,68 +161588,196 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_q + type: ::std::optional - annotation: null dynamic_type: int64_t + is_nullable: true + name: max_seqlen_k + type: ::std::optional + - annotation: null + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + name: dropout_p + type: double - annotation: null dynamic_type: int64_t is_nullable: false - name: num_head + name: custom_mask_type type: int64_t - annotation: null - dynamic_type: at::Tensor + default: false + dynamic_type: bool is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: compute_log_sumexp + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqlen_k + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, ::std::optional, ::std::optional, double, int64_t, bool, ::std::optional, const ::std::optional &, ::std::optional) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: value type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + name: bias + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + name: cu_seqlens_q + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & + name: cu_seqlens_k + type: const ::std::optional & - annotation: null - default: true - dynamic_type: bool + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_q + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_k + type: ::std::optional + - annotation: null + dynamic_type: double is_nullable: false - name: need_weights - type: bool + name: dropout_p + type: double - annotation: null - default: true + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: average_attn_weights + name: compute_log_sumexp type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, bool) - schema_order_arguments: + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqlen_k + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: int64_t + field_name: max_seqlen_batch_q + name: max_seqlen_batch_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_seqlen_batch_k + name: max_seqlen_batch_k + type: c10::SymInt + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _efficient_attention_backward + operator_name: _efficient_attention_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None, int? window_size=None, bool shared_storage_dqdkdv=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out_ + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -153367,65 +161793,203 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_k + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_seqlen_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_head + name: max_seqlen_k type: int64_t - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: logsumexp type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_requires_grad + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: num_splits_key + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: shared_storage_dqdkdv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, double, const at::Tensor &, const at::Tensor &, int64_t, bool, ::std::optional, ::std::optional, ::std::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out_ type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: out + type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + name: cu_seqlens_q + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & + name: cu_seqlens_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_seqlen_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_seqlen_k + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: logsumexp + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t - annotation: null - default: true dynamic_type: bool is_nullable: false - name: need_weights + name: bias_requires_grad type: bool - annotation: null - default: true + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: num_splits_key + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: average_attn_weights + kwarg_only: true + name: shared_storage_dqdkdv type: bool method_of: - Type @@ -153452,6 +162016,349 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _triton_scaled_dot_attention + operator_name: _triton_scaled_dot_attention + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: k + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: k + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fill_mem_eff_dropout_mask_ + operator_name: _fill_mem_eff_dropout_mask_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: seed + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: seed + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _triton_multi_head_attention + operator_name: _triton_multi_head_attention + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_airy_ai + operator_name: special_airy_ai + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_airy_ai(Tensor x) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_airy_ai_out + operator_name: special_airy_ai + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: special_bessel_j0 operator_name: special_bessel_j0 overload_name: '' @@ -153873,11 +162780,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t operator_name: special_chebyshev_polynomial_t overload_name: n_scalar @@ -153918,11 +162825,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t_out operator_name: special_chebyshev_polynomial_t overload_name: out @@ -154036,11 +162943,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t_out operator_name: special_chebyshev_polynomial_t overload_name: n_scalar_out @@ -154185,11 +163092,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u operator_name: special_chebyshev_polynomial_u overload_name: n_scalar @@ -154230,11 +163137,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u_out operator_name: special_chebyshev_polynomial_u overload_name: out @@ -154348,11 +163255,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u_out operator_name: special_chebyshev_polynomial_u overload_name: n_scalar_out @@ -154497,11 +163404,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v operator_name: special_chebyshev_polynomial_v overload_name: n_scalar @@ -154542,11 +163449,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v_out operator_name: special_chebyshev_polynomial_v overload_name: out @@ -154660,11 +163567,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v_out operator_name: special_chebyshev_polynomial_v overload_name: n_scalar_out @@ -154809,11 +163716,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w operator_name: special_chebyshev_polynomial_w overload_name: n_scalar @@ -154854,11 +163761,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w_out operator_name: special_chebyshev_polynomial_w overload_name: out @@ -154972,11 +163879,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w_out operator_name: special_chebyshev_polynomial_w overload_name: n_scalar_out @@ -155121,11 +164028,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h operator_name: special_hermite_polynomial_h overload_name: n_scalar @@ -155166,11 +164073,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h_out operator_name: special_hermite_polynomial_h overload_name: out @@ -155284,11 +164191,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h_out operator_name: special_hermite_polynomial_h overload_name: n_scalar_out @@ -155433,11 +164340,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he operator_name: special_hermite_polynomial_he overload_name: n_scalar @@ -155478,11 +164385,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he_out operator_name: special_hermite_polynomial_he overload_name: out @@ -155596,11 +164503,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he_out operator_name: special_hermite_polynomial_he overload_name: n_scalar_out @@ -155745,11 +164652,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l operator_name: special_laguerre_polynomial_l overload_name: n_scalar @@ -155790,11 +164697,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l_out operator_name: special_laguerre_polynomial_l overload_name: out @@ -155908,11 +164815,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l_out operator_name: special_laguerre_polynomial_l overload_name: n_scalar_out @@ -156057,11 +164964,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p operator_name: special_legendre_polynomial_p overload_name: n_scalar @@ -156102,11 +165009,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p_out operator_name: special_legendre_polynomial_p overload_name: out @@ -156220,11 +165127,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p_out operator_name: special_legendre_polynomial_p overload_name: n_scalar_out @@ -156873,11 +165780,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t operator_name: special_shifted_chebyshev_polynomial_t overload_name: n_scalar @@ -156918,11 +165825,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t_out operator_name: special_shifted_chebyshev_polynomial_t overload_name: out @@ -157036,11 +165943,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t_out operator_name: special_shifted_chebyshev_polynomial_t overload_name: n_scalar_out @@ -157185,11 +166092,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u operator_name: special_shifted_chebyshev_polynomial_u overload_name: n_scalar @@ -157230,11 +166137,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u_out operator_name: special_shifted_chebyshev_polynomial_u overload_name: out @@ -157348,11 +166255,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u_out operator_name: special_shifted_chebyshev_polynomial_u overload_name: n_scalar_out @@ -157497,11 +166404,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v operator_name: special_shifted_chebyshev_polynomial_v overload_name: n_scalar @@ -157542,11 +166449,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v_out operator_name: special_shifted_chebyshev_polynomial_v overload_name: out @@ -157660,11 +166567,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v_out operator_name: special_shifted_chebyshev_polynomial_v overload_name: n_scalar_out @@ -157809,11 +166716,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w operator_name: special_shifted_chebyshev_polynomial_w overload_name: n_scalar @@ -157854,11 +166761,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w_out operator_name: special_shifted_chebyshev_polynomial_w overload_name: out @@ -157972,11 +166879,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w_out operator_name: special_shifted_chebyshev_polynomial_w overload_name: n_scalar_out @@ -158278,15 +167185,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -158366,14 +167273,208 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_ + operator_name: _fused_adam_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -158472,15 +167573,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -158560,14 +167661,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -158581,6 +167682,720 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _fused_adamw_ + operator_name: _fused_adamw_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_sgd_ + operator_name: _fused_sgd_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_sgd_ + operator_name: _fused_sgd_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adagrad_ + operator_name: _fused_adagrad_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: state_sums + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: state_sums + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _propagate_xla_data + operator_name: _propagate_xla_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_propagate_xla_data(Tensor input, Tensor output) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: _new_zeros_with_same_feature_meta_out operator_name: _new_zeros_with_same_feature_meta overload_name: out @@ -158970,7 +168785,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -158980,7 +168795,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159030,8 +168845,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const c10::optional &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const ::std::optional &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159052,7 +168867,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159062,7 +168877,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159112,7 +168927,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -159240,7 +169055,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159250,17 +169065,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159310,7 +169125,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159321,7 +169136,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159352,7 +169167,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159362,17 +169177,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159422,7 +169237,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159575,12 +169390,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159593,11 +169408,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -159735,8 +169550,8 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159752,7 +169567,7 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160051,7 +169866,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -160115,6 +169930,65 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _test_functorch_fallback_out + operator_name: _test_functorch_fallback + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: bartlett_window_out operator_name: bartlett_window overload_name: out @@ -160246,12 +170120,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -160277,7 +170151,7 @@ is_nullable: false name: output_zero_point type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160288,12 +170162,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -160367,13 +170241,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160386,12 +170260,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160433,13 +170307,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160452,12 +170326,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -160500,13 +170374,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160520,12 +170394,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160578,20 +170452,20 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160608,13 +170482,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -160668,14 +170542,14 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t is_nullable: false name: minlength type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160687,7 +170561,7 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -160950,7 +170824,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -160973,7 +170847,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161004,7 +170878,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161020,7 +170894,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161079,7 +170953,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -161262,7 +171136,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -161285,7 +171159,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161316,7 +171190,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161332,7 +171206,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161391,7 +171265,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -161564,7 +171438,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -161587,7 +171461,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161638,7 +171512,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161654,7 +171528,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -162089,12 +171963,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162102,11 +171976,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -162357,17 +172231,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -162383,7 +172257,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162399,17 +172273,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -162525,22 +172399,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -162551,7 +172425,7 @@ is_nullable: false name: reserveSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162572,22 +172446,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -162641,141 +172515,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: cudnn_convolution_out - operator_name: cudnn_convolution - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - - annotation: null - dynamic_type: bool - is_nullable: false - name: benchmark - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: deterministic - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: allow_tf32 - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - - annotation: null - dynamic_type: bool - is_nullable: false - name: benchmark - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: deterministic - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: allow_tf32 - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false - name: cudnn_convolution_transpose_out operator_name: cudnn_convolution_transpose overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -162914,7 +172659,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163023,7 +172768,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -163169,7 +172914,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163192,7 +172937,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163213,7 +172958,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -163229,7 +172974,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163278,7 +173023,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163306,12 +173051,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163332,7 +173077,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -163353,12 +173098,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -164180,8 +173925,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164198,7 +173943,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -164623,7 +174368,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164636,7 +174381,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164676,7 +174421,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164815,7 +174560,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164828,7 +174573,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164868,7 +174613,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164992,14 +174737,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165045,7 +174790,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -165210,15 +174955,15 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165230,14 +174975,14 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165261,12 +175006,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_empty_out - operator_name: new_empty +- name: empty_permuted_out + operator_name: empty_permuted overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165276,26 +175021,26 @@ output: true type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: self - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: physical_layout type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: self - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: physical_layout type: at::IntArrayRef - allocate: true annotation: a! @@ -165320,12 +175065,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_empty_strided_out - operator_name: new_empty_strided +- name: new_empty_out + operator_name: new_empty overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165344,12 +175089,7 @@ is_nullable: false name: size type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165361,11 +175101,6 @@ is_nullable: false name: size type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165389,12 +175124,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_full_out - operator_name: new_full +- name: new_empty_strided_out + operator_name: new_empty_strided overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165414,11 +175149,11 @@ name: size type: at::IntArrayRef - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::IntArrayRef is_nullable: false - name: fill_value - type: const at::Scalar & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, const at::Scalar &, at::Tensor &) + name: stride + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165431,10 +175166,10 @@ name: size type: at::IntArrayRef - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::IntArrayRef is_nullable: false - name: fill_value - type: const at::Scalar & + name: stride + type: at::IntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165458,12 +175193,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_zeros_out - operator_name: new_zeros +- name: new_full_out + operator_name: new_full overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165482,7 +175217,76 @@ is_nullable: false name: size type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_zeros_out + operator_name: new_zeros + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165581,7 +175385,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165610,13 +175414,13 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, double, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, double, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165638,12 +175442,12 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165672,7 +175476,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165705,13 +175509,13 @@ name: axis type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165737,12 +175541,12 @@ name: axis type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165791,13 +175595,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165810,12 +175614,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165857,13 +175661,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165876,12 +175680,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -165903,7 +175707,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165972,7 +175776,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor + schema_string: aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -166047,13 +175851,13 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -166066,12 +175870,12 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166115,13 +175919,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -166129,12 +175933,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166335,6 +176139,65 @@ with_gil: false deprecated: false has_math_kernel: false +- name: floor_divide_out + operator_name: floor_divide + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: full_out operator_name: full overload_name: names_out @@ -166364,8 +176227,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -166382,7 +176245,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166431,13 +176294,13 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -166450,12 +176313,12 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166499,18 +176362,18 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional - schema_order_cpp_signature: at::Tensor & (c10::string_view, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (c10::string_view, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: c10::string_view @@ -166518,17 +176381,17 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -167649,12 +177512,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167680,7 +177543,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167691,12 +177554,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167817,7 +177680,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167843,7 +177706,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167869,7 +177732,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167958,10 +177821,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -167973,7 +177836,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167981,10 +177844,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168039,10 +177902,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168060,7 +177923,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168068,10 +177931,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168125,10 +177988,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168146,7 +178009,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168154,10 +178017,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168282,18 +178145,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168309,12 +178172,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -168420,18 +178283,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168462,12 +178325,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false @@ -168658,8 +178521,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168676,7 +178539,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170083,12 +179946,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: median_out - operator_name: median +- name: quantized_max_pool3d_out + operator_name: quantized_max_pool3d overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170102,13 +179965,79 @@ is_nullable: false name: self type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170132,12 +180061,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: nanmedian_out - operator_name: nanmedian +- name: median_out + operator_name: median overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170181,12 +180110,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _mps_convolution_out - operator_name: _mps_convolution +- name: nanmedian_out + operator_name: nanmedian overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170200,73 +180129,122 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: bias - type: const c10::optional & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: bias - type: const c10::optional & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mps_convolution_out + operator_name: _mps_convolution + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170295,7 +180273,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -170448,7 +180426,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170471,7 +180449,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -170492,7 +180470,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -170508,7 +180486,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -170912,17 +180890,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -170973,7 +180951,7 @@ is_nullable: false name: workspace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171029,17 +181007,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171215,17 +181193,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171241,7 +181219,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171257,17 +181235,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171373,28 +181351,28 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171415,22 +181393,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -171484,7 +181462,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171507,7 +181485,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171538,7 +181516,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171554,7 +181532,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171613,7 +181591,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171636,7 +181614,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171672,7 +181650,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171688,7 +181666,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171752,7 +181730,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171775,7 +181753,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171806,7 +181784,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171822,7 +181800,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171942,7 +181920,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -171987,8 +181965,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172014,7 +181992,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172059,7 +182037,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -172187,7 +182165,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172197,17 +182175,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172252,7 +182230,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172263,7 +182241,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172294,7 +182272,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172304,17 +182282,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172359,7 +182337,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172545,12 +182523,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172576,7 +182554,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172587,12 +182565,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172648,12 +182626,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: batch_norm_stats_out - operator_name: batch_norm_stats +- name: _native_batch_norm_legit_no_training_out + operator_name: _native_batch_norm_legit_no_training overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -172669,23 +182647,80 @@ name: out1 output: true type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: input type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: input type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double - annotation: null dynamic_type: double is_nullable: false @@ -172705,6 +182740,13 @@ name: out1 output: true type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & method_of: - Type - namespace @@ -172717,6 +182759,9 @@ - dynamic_type: at::Tensor name: out1 type: at::Tensor & + - dynamic_type: at::Tensor + name: out2 + type: at::Tensor & inplace: false is_factory_method: false abstract: true @@ -172724,12 +182769,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: batch_norm_gather_stats_out - operator_name: batch_norm_gather_stats +- name: batch_norm_stats_out + operator_name: batch_norm_stats overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -172751,42 +182796,76 @@ name: input type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: mean - type: const at::Tensor & + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, at::Tensor &, at::Tensor &) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: invstd + name: input type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: running_mean - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: running_var - type: const c10::optional & - - annotation: null - dynamic_type: double - is_nullable: false - name: momentum - type: double - annotation: null dynamic_type: double is_nullable: false name: eps type: double - - annotation: null - dynamic_type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor is_nullable: false - name: count - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, int64_t, at::Tensor &, at::Tensor &) - schema_order_arguments: + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out0 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out1 + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats_out + operator_name: batch_norm_gather_stats + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172806,12 +182885,54 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -172900,12 +183021,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -172921,7 +183042,7 @@ is_nullable: false name: counts type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, const at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172942,12 +183063,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -173038,27 +183159,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173074,7 +183195,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173090,27 +183211,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173228,7 +183349,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173244,7 +183365,7 @@ is_nullable: false name: bias_g type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173270,7 +183391,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173344,7 +183465,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -173377,23 +183498,23 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: count type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173419,16 +183540,16 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -173488,18 +183609,18 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: momentum type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173510,12 +183631,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -173559,7 +183680,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -173582,7 +183703,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -173596,7 +183717,7 @@ name: stride size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173612,7 +183733,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -173673,8 +183794,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -173686,7 +183807,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -173730,13 +183851,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173744,12 +183865,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -173865,8 +183986,8 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173887,7 +184008,7 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174263,7 +184384,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174337,12 +184458,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174350,11 +184471,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174451,8 +184572,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174464,7 +184585,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174512,14 +184633,14 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174531,13 +184652,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174581,13 +184702,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174595,12 +184716,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174629,7 +184750,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174649,13 +184770,13 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174668,12 +184789,12 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174702,7 +184823,7 @@ overload_name: low_dtype_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174727,13 +184848,13 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174751,12 +184872,12 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174804,8 +184925,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174817,7 +184938,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174865,14 +184986,14 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174884,13 +185005,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174934,13 +185055,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174948,12 +185069,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175041,7 +185162,7 @@ overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -175056,13 +185177,13 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175070,12 +185191,12 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175477,24 +185598,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175513,17 +185634,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -175762,12 +185883,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175790,11 +185911,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175974,13 +186095,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175988,12 +186109,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -176022,7 +186143,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -176044,19 +186165,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -176064,7 +186185,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176072,19 +186193,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -176145,13 +186266,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176159,12 +186280,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -176321,7 +186442,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -176807,6 +186928,55 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _nested_tensor_storage_offsets_out + operator_name: _nested_tensor_storage_offsets + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: _nested_from_padded_and_nested_example_out operator_name: _nested_from_padded_and_nested_example overload_name: out @@ -176871,7 +187041,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -176896,11 +187066,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176918,131 +187088,297 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_view_from_jagged_copy_out + operator_name: _nested_view_from_jagged_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values_copy_out + operator_name: _nested_get_values_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _trilinear_out + operator_name: _trilinear + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 type: at::IntArrayRef - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _trilinear_out - operator_name: _trilinear - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i3 - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand1 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand2 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand3 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: sumdim - type: at::IntArrayRef - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: unroll_dim - type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i3 - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand1 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand2 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand3 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: sumdim - type: at::IntArrayRef - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: unroll_dim - type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t - allocate: true annotation: a! dynamic_type: at::Tensor @@ -177331,12 +187667,12 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -177356,11 +187692,11 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -177704,7 +188040,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -177726,19 +188062,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -177746,7 +188082,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -177754,19 +188090,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -178025,8 +188361,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -178038,7 +188374,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178067,7 +188403,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -178131,13 +188467,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178145,12 +188481,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178253,12 +188589,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178266,11 +188602,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178383,12 +188719,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178396,11 +188732,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178444,12 +188780,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178457,11 +188793,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178510,12 +188846,12 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178528,11 +188864,11 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178640,7 +188976,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -178656,8 +188992,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178668,7 +189004,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -178684,7 +189020,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178708,51 +189044,85 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_sum_out - operator_name: _sparse_sum - overload_name: dim_out +- name: _batch_norm_with_update_functional + operator_name: _batch_norm_with_update_functional + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out) arguments: - - allocate: true - annotation: a! + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: out - output: true - type: at::Tensor & + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: running_mean type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: dim - size: 1 - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: input type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: dim - size: 1 - type: at::IntArrayRef - - allocate: true - annotation: a! + name: running_mean + type: const at::Tensor & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: out - output: true - type: at::Tensor & + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double method_of: - Type - namespace @@ -178760,8 +189130,25 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + field_name: running_mean_out + name: running_mean_out + type: at::Tensor + - dynamic_type: at::Tensor + field_name: running_var_out + name: running_var_out + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -178769,57 +189156,139 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_sum_backward_out - operator_name: _sparse_sum_backward +- name: _batch_norm_no_update_out + operator_name: _batch_norm_no_update overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) arguments: - allocate: true annotation: a! dynamic_type: at::Tensor is_nullable: false - name: out + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out3 output: true type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad + name: input type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double is_nullable: false - name: self - type: const at::Tensor & + name: momentum + type: double - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: double is_nullable: false - name: dim - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad + name: input type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double is_nullable: false - name: self - type: const at::Tensor & + name: momentum + type: double - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: double is_nullable: false - name: dim - type: at::IntArrayRef + name: eps + type: double - allocate: true annotation: a! dynamic_type: at::Tensor is_nullable: false - name: out + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out3 output: true type: at::Tensor & method_of: @@ -178829,7 +189298,16 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out + name: out0 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out1 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out2 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out3 type: at::Tensor & inplace: false is_factory_method: false @@ -178838,12 +189316,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_csr_sum_out - operator_name: _sparse_csr_sum - overload_name: dim_dtype_out +- name: _sparse_sum_out + operator_name: _sparse_sum + overload_name: dim_out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -178863,20 +189341,7 @@ name: dim size: 1 type: at::IntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178889,19 +189354,162 @@ name: dim size: 1 type: at::IntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum_backward_out + operator_name: _sparse_sum_backward + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_csr_sum_out + operator_name: _sparse_csr_sum + overload_name: dim_dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178957,13 +189565,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178983,12 +189591,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179338,12 +189946,12 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179361,11 +189969,11 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179412,14 +190020,14 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179430,7 +190038,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false @@ -179541,13 +190149,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179555,12 +190163,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179609,13 +190217,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179628,12 +190236,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179675,13 +190283,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179694,12 +190302,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -180339,7 +190947,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -180373,7 +190981,14 @@ is_nullable: false name: values type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -180400,6 +191015,13 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -180770,12 +191392,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _to_dense_out - operator_name: _to_dense +- name: _sparse_mask_projection_out + operator_name: _sparse_mask_projection overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -180789,132 +191411,215 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - name: dtype - type: c10::optional - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _coalesce_out - operator_name: _coalesce - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor + default: false + dynamic_type: bool is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + name: accumulate_matches + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _coalesced_out - operator_name: _coalesced - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: coalesced - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self + name: mask type: const at::Tensor & - annotation: null + default: false dynamic_type: bool is_nullable: false - name: coalesced + name: accumulate_matches + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _to_dense_out + operator_name: _to_dense + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesce_out + operator_name: _coalesce + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesced_out + operator_name: _coalesced + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced type: bool - allocate: true annotation: a! @@ -181112,12 +191817,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_out - operator_name: to_sparse +- name: _to_sparse_out + operator_name: _to_sparse overload_name: sparse_dim_out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181171,12 +191876,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_out - operator_name: to_sparse +- name: _to_sparse_out + operator_name: _to_sparse overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181191,14 +191896,14 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -181206,13 +191911,13 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181220,14 +191925,14 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -181235,12 +191940,12 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181264,12 +191969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_csr_out - operator_name: to_sparse_csr +- name: _to_sparse_csr_out + operator_name: _to_sparse_csr overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181284,12 +191989,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181297,11 +192002,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181325,12 +192030,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_csc_out - operator_name: to_sparse_csc +- name: _to_sparse_csc_out + operator_name: _to_sparse_csc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181345,12 +192050,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181358,11 +192063,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181386,12 +192091,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_bsr_out - operator_name: to_sparse_bsr +- name: _to_sparse_bsr_out + operator_name: _to_sparse_bsr overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181412,12 +192117,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181431,11 +192136,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181459,12 +192164,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_bsc_out - operator_name: to_sparse_bsc +- name: _to_sparse_bsc_out + operator_name: _to_sparse_bsc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181485,12 +192190,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181504,11 +192209,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181552,12 +192257,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181565,11 +192270,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181598,7 +192303,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181640,7 +192345,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -181680,7 +192385,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -181713,7 +192418,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181754,7 +192459,13 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::OptionalIntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181788,6 +192499,12 @@ is_nullable: false name: groups type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -183540,13 +194257,13 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -183561,12 +194278,12 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -183809,7 +194526,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + schema_string: aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () arguments: - allocate: true annotation: a! @@ -183834,96 +194551,96 @@ type: at::TensorList - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: z_state type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cell_state_fwd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: layersOutputs + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: void (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_y + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: z_state - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cell_state_fwd - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: input - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: layersOutputs - type: const at::Tensor & - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: hx - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: params - type: at::TensorList - - annotation: null - dynamic_type: bool - is_nullable: false - name: has_biases - type: bool - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: num_layers - type: int64_t - - annotation: null - dynamic_type: double - is_nullable: false - name: dropout - type: double - - annotation: null - dynamic_type: bool - is_nullable: false - name: train - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: bidirectional - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: batch_first - type: bool - schema_order_cpp_signature: void (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_y - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: grad_hy - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184066,14 +194783,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -184095,13 +194812,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -184177,12 +194894,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184203,18 +194920,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184319,14 +195036,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -184348,13 +195065,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -185401,18 +196118,18 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -185425,17 +196142,17 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -185489,12 +196206,12 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -185512,11 +196229,11 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186338,15 +197055,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186362,14 +197079,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186414,15 +197131,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186438,14 +197155,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186487,13 +197204,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186506,12 +197223,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186553,13 +197270,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186572,12 +197289,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186614,13 +197331,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186628,12 +197345,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186670,13 +197387,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186684,12 +197401,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186738,13 +197455,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186764,12 +197481,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186818,13 +197535,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186844,12 +197561,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186898,13 +197615,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186924,12 +197641,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186978,13 +197695,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187004,12 +197721,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187058,13 +197775,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187084,12 +197801,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187138,13 +197855,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187164,12 +197881,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187212,13 +197929,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187232,12 +197949,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187280,13 +197997,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187300,12 +198017,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187347,13 +198064,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187366,12 +198083,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187413,13 +198130,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187432,12 +198149,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187810,19 +198527,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187830,7 +198547,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: void (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187843,19 +198560,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187908,19 +198625,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187928,7 +198645,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187941,19 +198658,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188014,7 +198731,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188022,7 +198739,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::TensorList, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::TensorList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -188040,7 +198757,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188130,95 +198847,6 @@ with_gil: false deprecated: false has_math_kernel: false -- name: argsort_out - operator_name: argsort - overload_name: stable_out - manual_kernel_registration: false - category_override: '' - schema_string: aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: stable - type: bool - - annotation: null - default: -1 - dynamic_type: int64_t - is_nullable: false - kwarg_only: true - name: dim - type: int64_t - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: descending - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, int64_t, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: stable - type: bool - - annotation: null - default: -1 - dynamic_type: int64_t - is_nullable: false - kwarg_only: true - name: dim - type: int64_t - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: descending - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false - name: unfold_backward_out operator_name: unfold_backward overload_name: out @@ -188340,13 +198968,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -188366,12 +198994,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -188765,12 +199393,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188785,11 +199413,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188797,9 +199432,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -188821,12 +199463,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_out - operator_name: _foreach_mul - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188841,11 +199483,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188853,10 +199495,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -188877,12 +199519,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_out - operator_name: _foreach_div - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188897,11 +199539,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188909,9 +199558,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -188933,12 +199589,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_out - operator_name: _foreach_clamp_min +- name: _foreach_sub_out + operator_name: _foreach_sub overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188989,12 +199645,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max - overload_name: Scalar_out +- name: _foreach_sub_out + operator_name: _foreach_sub + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189009,11 +199665,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189021,9 +199684,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -189045,12 +199715,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_out - operator_name: _foreach_maximum - overload_name: Scalar_out +- name: _foreach_sub_out + operator_name: _foreach_sub + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189065,11 +199735,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189077,10 +199747,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189101,12 +199771,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum +- name: _foreach_mul_out + operator_name: _foreach_mul overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189157,12 +199827,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_out - operator_name: _foreach_add +- name: _foreach_mul_out + operator_name: _foreach_mul overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189181,14 +199851,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189200,13 +199863,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189227,12 +199883,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: List_out +- name: _foreach_mul_out + operator_name: _foreach_mul + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189247,18 +199903,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189266,17 +199915,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189299,10 +199941,10 @@ has_math_kernel: false - name: _foreach_mul_out operator_name: _foreach_mul - overload_name: List_out + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189317,11 +199959,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189329,10 +199971,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189355,10 +199997,10 @@ has_math_kernel: false - name: _foreach_div_out operator_name: _foreach_div - overload_name: List_out + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189373,11 +200015,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189385,10 +200027,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189409,12 +200051,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_out - operator_name: _foreach_clamp_min +- name: _foreach_div_out + operator_name: _foreach_div overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189465,12 +200107,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max - overload_name: List_out +- name: _foreach_div_out + operator_name: _foreach_div + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189485,11 +200127,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189497,10 +200139,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189521,12 +200163,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_out - operator_name: _foreach_maximum - overload_name: List_out +- name: _foreach_div_out + operator_name: _foreach_div + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189541,11 +200183,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189553,10 +200195,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189577,12 +200219,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum - overload_name: List_out +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189597,11 +200239,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189609,10 +200251,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189633,12 +200275,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_out - operator_name: _foreach_add - overload_name: ScalarList_out +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189653,78 +200295,22 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - - allocate: true - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: out - output: true - type: at::TensorList - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: [] - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: ScalarList_out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () - arguments: - - allocate: true - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: out - output: true + name: other type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189745,12 +200331,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_out - operator_name: _foreach_div +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189801,12 +200387,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_out - operator_name: _foreach_mul - overload_name: ScalarList_out +- name: _foreach_clamp_min_out + operator_name: _foreach_clamp_min + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189821,11 +200407,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189833,10 +200419,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189859,10 +200445,10 @@ has_math_kernel: false - name: _foreach_clamp_min_out operator_name: _foreach_clamp_min - overload_name: ScalarList_out + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189877,11 +200463,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189889,10 +200475,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189913,12 +200499,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max +- name: _foreach_clamp_min_out + operator_name: _foreach_clamp_min overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189971,10 +200557,10 @@ has_math_kernel: false - name: _foreach_maximum_out operator_name: _foreach_maximum - overload_name: ScalarList_out + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189989,11 +200575,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -190001,10 +200587,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190025,12 +200611,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum - overload_name: ScalarList_out +- name: _foreach_maximum_out + operator_name: _foreach_maximum + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190045,11 +200631,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -190057,10 +200643,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190081,12 +200667,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp_out - operator_name: _foreach_exp - overload_name: out +- name: _foreach_maximum_out + operator_name: _foreach_maximum + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190100,13 +200686,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190127,12 +200723,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero_out - operator_name: _foreach_zero - overload_name: out +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190146,13 +200742,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190173,35 +200779,55 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero - operator_name: _foreach_zero - overload_name: '' +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + schema_string: aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - field_name: self_out - name: self_out - type: ::std::vector + returns: [] inplace: false is_factory_method: false abstract: true @@ -190209,12 +200835,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sqrt_out - operator_name: _foreach_sqrt - overload_name: out +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190228,13 +200854,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190255,12 +200891,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs_out - operator_name: _foreach_abs - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190274,13 +200910,45 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190301,12 +200969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos_out - operator_name: _foreach_acos - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190320,13 +200988,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190347,12 +201045,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin_out - operator_name: _foreach_asin - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190366,13 +201064,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190393,12 +201121,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan_out - operator_name: _foreach_atan - overload_name: out +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190412,13 +201140,45 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190439,12 +201199,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil_out - operator_name: _foreach_ceil - overload_name: out +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190458,13 +201218,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190485,12 +201275,88 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos_out - operator_name: _foreach_cos +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_abs_out + operator_name: _foreach_abs overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190531,12 +201397,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh_out - operator_name: _foreach_cosh +- name: _foreach_acos_out + operator_name: _foreach_acos overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190577,12 +201443,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf_out - operator_name: _foreach_erf +- name: _foreach_asin_out + operator_name: _foreach_asin overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190623,12 +201489,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc_out - operator_name: _foreach_erfc +- name: _foreach_atan_out + operator_name: _foreach_atan overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190669,12 +201535,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1_out - operator_name: _foreach_expm1 +- name: _foreach_ceil_out + operator_name: _foreach_ceil overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190715,12 +201581,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor_out - operator_name: _foreach_floor +- name: _foreach_cos_out + operator_name: _foreach_cos overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190761,12 +201627,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log_out - operator_name: _foreach_log +- name: _foreach_cosh_out + operator_name: _foreach_cosh overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190807,12 +201673,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10_out - operator_name: _foreach_log10 +- name: _foreach_erf_out + operator_name: _foreach_erf overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190853,12 +201719,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p_out - operator_name: _foreach_log1p +- name: _foreach_erfc_out + operator_name: _foreach_erfc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190899,12 +201765,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2_out - operator_name: _foreach_log2 +- name: _foreach_exp_out + operator_name: _foreach_exp overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190945,12 +201811,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg_out - operator_name: _foreach_neg +- name: _foreach_expm1_out + operator_name: _foreach_expm1 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190991,12 +201857,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan_out - operator_name: _foreach_tan +- name: _foreach_floor_out + operator_name: _foreach_floor overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191037,12 +201903,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh_out - operator_name: _foreach_tanh +- name: _foreach_frac_out + operator_name: _foreach_frac overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191083,12 +201949,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin_out - operator_name: _foreach_sin - overload_name: out +- name: _foreach_lerp_out + operator_name: _foreach_lerp + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191102,13 +201968,33 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191129,12 +202015,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh_out - operator_name: _foreach_sinh - overload_name: out +- name: _foreach_lerp_out + operator_name: _foreach_lerp + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191148,13 +202034,33 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191175,12 +202081,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round_out - operator_name: _foreach_round +- name: _foreach_lgamma_out + operator_name: _foreach_lgamma overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191221,12 +202127,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma_out - operator_name: _foreach_lgamma +- name: _foreach_log_out + operator_name: _foreach_log overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191267,12 +202173,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac_out - operator_name: _foreach_frac +- name: _foreach_log10_out + operator_name: _foreach_log10 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191313,12 +202219,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal_out - operator_name: _foreach_reciprocal +- name: _foreach_log1p_out + operator_name: _foreach_log1p overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191359,12 +202265,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid_out - operator_name: _foreach_sigmoid +- name: _foreach_log2_out + operator_name: _foreach_log2 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191405,12 +202311,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc_out - operator_name: _foreach_trunc +- name: _foreach_max_out + operator_name: _foreach_max overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191451,12 +202357,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv - overload_name: Scalar_out +- name: _foreach_neg_out + operator_name: _foreach_neg + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191470,45 +202376,83 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_norm_out + operator_name: _foreach_norm + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: + name: out + output: true + type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor1 - type: at::TensorList + name: ord + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, ::std::optional, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 + default: 2 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: ord type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191529,12 +202473,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: Scalar_out +- name: _foreach_pow_out + operator_name: _foreach_pow + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191551,41 +202495,75 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: exponent type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: - - annotation: null + name: exponent + type: at::TensorList + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_pow_out + operator_name: _foreach_pow + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & - allocate: true annotation: a! @@ -191607,12 +202585,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv +- name: _foreach_pow_out + operator_name: _foreach_pow overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191626,42 +202604,22 @@ is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - allocate: true annotation: a! @@ -191683,12 +202641,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv - overload_name: Tensor_out +- name: _foreach_reciprocal_out + operator_name: _foreach_reciprocal + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191702,43 +202660,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round_out + operator_name: _foreach_round + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191759,12 +202733,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: ScalarList_out +- name: _foreach_sigmoid_out + operator_name: _foreach_sigmoid + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191778,43 +202752,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sign_out + operator_name: _foreach_sign + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191835,12 +202825,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: Tensor_out +- name: _foreach_sin_out + operator_name: _foreach_sin + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191854,43 +202844,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sinh_out + operator_name: _foreach_sinh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191911,12 +202917,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_norm_out - operator_name: _foreach_norm - overload_name: Scalar_out +- name: _foreach_sqrt_out + operator_name: _foreach_sqrt + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191930,25 +202936,13 @@ is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191969,12 +202963,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_out - operator_name: _foreach_lerp - overload_name: List_out +- name: _foreach_tan_out + operator_name: _foreach_tan + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191988,32 +202982,58 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: weights + name: out + output: true type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh_out + operator_name: _foreach_tanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList - allocate: true annotation: a! @@ -192035,12 +203055,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_out - operator_name: _foreach_lerp - overload_name: Scalar_out +- name: _foreach_trunc_out + operator_name: _foreach_trunc + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -192054,33 +203074,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::TensorList is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: - - annotation: null + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_zero_out + operator_name: _foreach_zero + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false - name: weight - type: const at::Scalar & + name: self + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -192101,86 +203147,103 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize_out - operator_name: bucketize - overload_name: Scalar_out +- name: _foreach_zero + operator_name: _foreach_zero + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy_out + operator_name: _foreach_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & + type: at::TensorList - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::TensorList is_nullable: false - kwarg_only: true - name: out_int32 - type: bool + name: src + type: at::TensorList - annotation: null default: false dynamic_type: bool is_nullable: false - kwarg_only: true - name: right + name: non_blocking type: bool - schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, bool, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & + type: at::TensorList - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::TensorList is_nullable: false - kwarg_only: true - name: out_int32 - type: bool + name: src + type: at::TensorList - annotation: null default: false dynamic_type: bool is_nullable: false - kwarg_only: true - name: right + name: non_blocking type: bool - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -192188,12 +203251,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: searchsorted_out - operator_name: searchsorted +- name: bucketize_out + operator_name: bucketize overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -192202,16 +203265,16 @@ name: out output: true type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - annotation: null dynamic_type: const at::Scalar & is_nullable: false name: self type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & - annotation: null default: false dynamic_type: bool @@ -192226,32 +203289,18 @@ kwarg_only: true name: right type: bool - - annotation: null - default: c10::nullopt - dynamic_type: c10::string_view - is_nullable: true - kwarg_only: true - name: side - type: c10::optional - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - kwarg_only: true - name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - annotation: null dynamic_type: const at::Scalar & is_nullable: false name: self type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & - annotation: null default: false dynamic_type: bool @@ -192266,20 +203315,6 @@ kwarg_only: true name: right type: bool - - annotation: null - default: c10::nullopt - dynamic_type: c10::string_view - is_nullable: true - kwarg_only: true - name: side - type: c10::optional - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - kwarg_only: true - name: sorter - type: const c10::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -192953,7 +203988,7 @@ overload_name: output_mask_out manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -193102,7 +204137,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193131,7 +204166,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -193150,7 +204185,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193172,7 +204207,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -193219,7 +204254,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193249,7 +204284,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193271,7 +204306,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193294,7 +204329,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193344,7 +204379,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193374,7 +204409,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193396,7 +204431,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193419,7 +204454,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193705,8 +204740,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional>, at::Tensor &) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional>, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193717,7 +204752,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> + type: ::std::optional> - allocate: true annotation: a! dynamic_type: at::Tensor @@ -193918,21 +204953,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -193948,13 +204983,13 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193972,21 +205007,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194002,12 +205037,12 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194071,14 +205106,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194087,13 +205122,13 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, int64_t, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -194121,14 +205156,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194137,12 +205172,12 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194186,30 +205221,30 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::TensorList, ::std::optional, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -194217,29 +205252,29 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194617,12 +205652,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -194640,11 +205675,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -195157,24 +206192,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -195188,17 +206223,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -196235,7 +207270,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -196253,7 +207288,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -196390,14 +207425,14 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -196494,13 +207529,13 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -196595,7 +207630,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -196609,12 +207644,12 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -196666,7 +207701,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -196680,11 +207715,11 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -196863,76 +207898,1637 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foobar_out + operator_name: _foobar + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg1 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg2 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: arg3 + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg1 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg2 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: arg3 + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_out + operator_name: _fused_adam + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam + operator_name: _fused_adam + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_out + operator_name: _fused_adam + overload_name: tensor_lr_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam + operator_name: _fused_adam + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw_out + operator_name: _fused_adamw + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw + operator_name: _fused_adamw + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw_out + operator_name: _fused_adamw + overload_name: tensor_lr_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + kwarg_only: true + name: lr type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: value - type: const at::Tensor & + kwarg_only: true + name: beta1 + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: proj_bias - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: found_inf + type: const ::std::optional & - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -196940,288 +209536,213 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _transformer_decoder_only_layer_fwd_out - operator_name: _transformer_decoder_only_layer_fwd - overload_name: out +- name: _fused_adamw + operator_name: _fused_adamw + overload_name: tensor_lr manual_kernel_registration: false category_override: '' - schema_string: aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: src - type: const at::Tensor & + name: self + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: embed_dim - type: int64_t + name: grads + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: num_heads - type: int64_t + name: exp_avgs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: max_exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: proj_weight - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + kwarg_only: true + name: lr type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool - annotation: null dynamic_type: double is_nullable: false - name: eps + kwarg_only: true + name: beta1 type: double - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: norm_bias_2 - type: const at::Tensor & + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_weight_1 - type: const at::Tensor & + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_bias_1 - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_weight_2 - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_bias_2 - type: const at::Tensor & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: maximize + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &) + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: src - type: const at::Tensor & + name: self + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: embed_dim - type: int64_t + name: grads + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: num_heads - type: int64_t + name: exp_avgs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: max_exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: proj_weight - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + kwarg_only: true + name: lr type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool - annotation: null dynamic_type: double is_nullable: false - name: eps + kwarg_only: true + name: beta1 type: double - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: norm_bias_2 - type: const at::Tensor & + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_weight_1 - type: const at::Tensor & + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_bias_1 - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_weight_2 - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_bias_2 - type: const at::Tensor & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: maximize + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & + kwarg_only: true + name: found_inf + type: const ::std::optional & method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor - name: out0 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out1 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out2 - type: at::Tensor & + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -197229,239 +209750,177 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _native_decoder_only_multi_head_attention_out - operator_name: _native_decoder_only_multi_head_attention +- name: _fused_sgd_out + operator_name: _fused_sgd overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + schema_string: aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - - allocate: true - annotation: d! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: out3 + name: out output: true - type: at::Tensor & + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList is_nullable: false - name: key - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Tensor & + name: momentum_buffer_list + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: momentum + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: lr + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: dampening + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: nesterov + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_bias - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: need_weights - type: bool - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: average_attn_weights - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList is_nullable: false - name: key - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Tensor & + name: momentum_buffer_list + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: momentum + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: lr + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: dampening + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: nesterov + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_bias - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: need_weights - type: bool - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: average_attn_weights - type: bool + kwarg_only: true + name: found_inf + type: const ::std::optional & - allocate: true annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - - allocate: true - annotation: d! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: out3 + name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out0 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out1 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out2 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out3 - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -197469,86 +209928,175 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foobar_out - operator_name: _foobar - overload_name: out +- name: _fused_sgd + operator_name: _fused_sgd + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg1 + kwarg_only: true + name: nesterov type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg2 + kwarg_only: true + name: maximize type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false kwarg_only: true - name: arg3 + name: is_first_step type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, bool, bool, at::Tensor &) - schema_order_arguments: - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg1 + kwarg_only: true + name: nesterov type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg2 + kwarg_only: true + name: maximize type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false kwarg_only: true - name: arg3 + name: is_first_step type: bool - - allocate: true - annotation: a! + - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: momentum_buffer_list_out + name: momentum_buffer_list_out + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -197556,12 +210104,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adam_out - operator_name: _fused_adam - overload_name: out +- name: _fused_sgd_out + operator_name: _fused_sgd + overload_name: tensor_lr_out manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + schema_string: aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -197583,64 +210131,49 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197648,15 +210181,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -197671,64 +210204,49 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197736,14 +210254,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -197764,12 +210282,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adam - operator_name: _fused_adam - overload_name: '' +- name: _fused_sgd + operator_name: _fused_sgd + overload_name: tensor_lr manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + schema_string: aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) arguments: - annotation: null dynamic_type: at::TensorList @@ -197784,64 +210302,49 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197849,15 +210352,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -197872,64 +210375,49 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197937,14 +210425,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -197960,16 +210448,8 @@ name: grads_out type: ::std::vector - dynamic_type: at::TensorList - field_name: exp_avgs_out - name: exp_avgs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: exp_avg_sqs_out - name: exp_avg_sqs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: max_exp_avg_sqs_out - name: max_exp_avg_sqs_out + field_name: momentum_buffer_list_out + name: momentum_buffer_list_out type: ::std::vector inplace: false is_factory_method: false @@ -197978,12 +210458,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adamw_out - operator_name: _fused_adamw +- name: _fused_adagrad_out + operator_name: _fused_adagrad overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + schema_string: aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -198005,19 +210485,9 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs + name: state_sums type: at::TensorList - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null dynamic_type: at::TensorList is_nullable: false name: state_steps @@ -198032,13 +210502,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198052,12 +210516,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198070,15 +210528,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -198093,19 +210551,9 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs + name: state_sums type: at::TensorList - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null dynamic_type: at::TensorList is_nullable: false name: state_steps @@ -198120,13 +210568,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198140,12 +210582,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198158,14 +210594,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -198186,12 +210622,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adamw - operator_name: _fused_adamw +- name: _fused_adagrad + operator_name: _fused_adagrad overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + schema_string: aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out) arguments: - annotation: null dynamic_type: at::TensorList @@ -198206,17 +210642,7 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs + name: state_sums type: at::TensorList - annotation: null dynamic_type: at::TensorList @@ -198233,13 +210659,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198253,12 +210673,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198271,15 +210685,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -198294,17 +210708,7 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs + name: state_sums type: at::TensorList - annotation: null dynamic_type: at::TensorList @@ -198321,13 +210725,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198341,12 +210739,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198359,14 +210751,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -198382,16 +210774,12 @@ name: grads_out type: ::std::vector - dynamic_type: at::TensorList - field_name: exp_avgs_out - name: exp_avgs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: exp_avg_sqs_out - name: exp_avg_sqs_out + field_name: state_sums_out + name: state_sums_out type: ::std::vector - dynamic_type: at::TensorList - field_name: max_exp_avg_sqs_out - name: max_exp_avg_sqs_out + field_name: state_steps_out + name: state_steps_out type: ::std::vector inplace: false is_factory_method: false diff --git a/src/lantern/headers/src/main.cpp b/src/lantern/headers/src/main.cpp index 99d0e7b892..0cca20691e 100644 --- a/src/lantern/headers/src/main.cpp +++ b/src/lantern/headers/src/main.cpp @@ -90,6 +90,41 @@ std::string addNamespace(std::string name) { return name; } +std::string dtype2type (std::string dtype) { + if (dtype == "at::Tensor") return "Tensor"; + if (dtype == "at::Stream") return "Stream"; + if (dtype == "at::IntArrayRef") return "IntArrayRef"; + if (dtype == "const at::Scalar &") return "Scalar"; + if (dtype == "at::TensorList") return "TensorList"; + if (dtype == "at::ScalarType") return "ScalarType"; + if (dtype == "at::TensorOptions") return "TensorOptions"; + if (dtype == "at::Dimname") return "Dimname"; + if (dtype == "at::Generator") return "Generator"; + if (dtype == "at::DimnameList") return "DimnameList"; + if (dtype == "at::MemoryFormat") return "MemoryFormat"; + if (dtype == "at::Device") return "Device"; + if (dtype == "at::Layout") return "Layout"; + if (dtype == "at::Storage") return "Storage"; + if (dtype == "at::DeviceIndex") return "DeviceIndex"; + + if (dtype == "int64_t") return "int64_t"; + if (dtype == "bool") return "bool_t"; + if (dtype == "double") return "double_t"; + + if (dtype == "c10::string_view") return "string_view"; + + if (dtype == "at::ArrayRef") return "vector::Scalar"; + if (dtype == "at::ArrayRef") return "vector::double_t"; + if (dtype == "::std::array") return "vector::bool_t"; + if (dtype == "::std::array") return "vector::bool_t"; + if (dtype == "::std::array") return "vector::bool_t"; + + if (dtype == "const c10::List<::std::optional> &") return "optional::TensorList" ; + if (dtype == "const at::ITensorListRef &") return "TensorList"; + + throw std::runtime_error("Unknown type " + dtype); +} + std::string buildCalls(std::string name, YAML::Node node, size_t start) { std::string arguments = ""; @@ -98,122 +133,22 @@ std::string buildCalls(std::string name, YAML::Node node, size_t start) { arguments += ", "; } - std::string type = removeAt(node[idx]["dynamic_type"].as()); - std::string dtype = removeAt(node[idx]["type"].as()); - - if (type == "ArrayRef" & - dtype != "c10::optional>") { - type = "std::vector"; - } else if (type == "const c10::List> &") { - type = "c10::List>"; - } else if (type == "Generator *") { - type = "std::shared_ptr"; - } else if (type == "Stream") { - type = "at::Stream"; - } else if (dtype == "const c10::optional &") { - type = "c10::optional"; - } else if (type == "const at::Scalar &" && - dtype == "const c10::optional &") { - type = "c10::optional"; - } - - // add optional call if required - std::string call = node[idx]["name"].as(); - if ((dtype.find("c10::optional") != std::string::npos) & - (type != "c10::List>") & - (type != "c10::optional")) { - type = "c10::optional<" + type + ">"; - } + std::string dtype = node[idx]["dynamic_type"].as(); + std::string type = node[idx]["type"].as(); + bool is_nullable = node[idx]["is_nullable"].as(); + std::string arg_name = node[idx]["name"].as(); - if (type == "Tensor") { - arguments += "from_raw::Tensor(" + call + ")"; - } else if (type == "TensorList") { - arguments += "from_raw::TensorList(" + call + ")"; - } else if (type == "ScalarType") { - arguments += "from_raw::ScalarType(" + call + ")"; - } else if (type == "Scalar") { - arguments += "from_raw::Scalar(" + call + ")"; - } else if (type == "TensorOptions") { - arguments += "from_raw::TensorOptions(" + call + ")"; - } else if (type == "Device") { - arguments += "from_raw::Device(" + call + ")"; - } else if (type == "Dimname") { - arguments += "from_raw::Dimname(" + call + ")"; - } else if (type == "DimnameList") { - arguments += "from_raw::DimnameList(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::DimnameList(" + call + ")"; - } else if (type == "Generator") { - arguments += "from_raw::Generator(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::Generator(" + call + ")"; - } else if (type == "IntArrayRef") { - arguments += "from_raw::IntArrayRef(" + call + ")"; - } else if (type == "Storage") { - arguments += "from_raw::Storage(" + call + ")"; - } else if (type == "std::string") { - arguments += "from_raw::string(" + call + ")"; - } else if (type == "int64_t") { - arguments += "from_raw::int64_t(" + call + ")"; - } else if (type == "bool") { - arguments += "from_raw::bool_t(" + call + ")"; - } else if (type == "double") { - arguments += "from_raw::double_t(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::double_t(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::int64_t(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::bool_t(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::Tensor(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::ScalarType(" + call + ")"; - } else if (type == "::std::array" || type == "std::array" || - type == "std::array" || type == "::std::array" || - type == "::std::array") { - arguments += "from_raw::vector::bool_t(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::string(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::MemoryFormat(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::Scalar(" + call + ")"; - } else if (type == "c10::List>") { - arguments += "from_raw::optional::TensorList(" + call + ")"; - } else if (type == "ArrayRef") { - arguments += "from_raw::vector::Scalar(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::IntArrayRef(" + call + ")"; - } else if (type == "c10::optional>") { - arguments += "from_raw::optional::DoubleArrayRef(" + call + ")"; - } else if (type == "at::Stream") { - arguments += "from_raw::Stream(" + call + ")"; - } else if (type == "MemoryFormat") { - arguments += "from_raw::MemoryFormat(" + call + ")"; - } else if (type == "c10::string_view") { - arguments += "from_raw::string_view(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::string_view(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::Device(" + call + ")"; - } else if (type == "c10::SymIntArrayRef") { - arguments += "from_raw::SymIntArrayRef(" + call + ")"; - } else if (type == "c10::SymInt") { - arguments += "from_raw::SymInt(" + call + ")"; - } else if (type == "Layout") { - arguments += "from_raw::Layout(" + call + ")"; - } else if (type == "c10::optional") { - arguments += "from_raw::optional::Layout(" + call + ")"; - } else if (type == "const ITensorListRef &") { - arguments += "from_raw::TensorList(" + call + ")"; + std::string arg_call; + if (dtype == "at::ArrayRef" && is_nullable) { + arg_call = "from_raw::optional::DoubleArrayRef(" + arg_name + ")"; + } else if (dtype == "const c10::List<::std::optional> &") { + arg_call = "from_raw::optional::TensorList(" + arg_name + ")"; } else { - throw std::runtime_error("Unknown type " + type); - } + arg_call = "from_raw::" + (is_nullable ? std::string("optional::") : "") + + dtype2type(dtype) + "(" + arg_name + ")"; + }; - if (type == "std::shared_ptr") { - arguments += ".get()"; - } + arguments += arg_call; } return arguments; @@ -284,6 +219,10 @@ bool isSupported(YAML::Node node) { return false; } + if (name == "sym_size" || name == "sym_numel" || name == "sym_stride" || name == "sym_storage_offset") { + return false; + } + return true; } @@ -412,6 +351,10 @@ int main(int argc, char *argv[]) { buildArgumentsCalls(name, config[idx]["arguments"]); std::string function = toFunction(name, config[idx]["arguments"]); + if (name == "gradient") { + std::cout << "gradient!" << std::endl; + } + if (hasMethodOf(config[idx], "namespace")) { headers.push_back(" LANTERN_API void* (LANTERN_PTR _lantern_" + function + ")(" + arguments + ");"); diff --git a/src/lantern/include/lantern/lantern.h b/src/lantern/include/lantern/lantern.h index 863bfcafb1..62bc14bd21 100644 --- a/src/lantern/include/lantern/lantern.h +++ b/src/lantern/include/lantern/lantern.h @@ -6,6 +6,8 @@ #else #define WIN32_LEAN_AND_MEAN 1 #include +#undef max +#undef min #endif #ifndef HOST_API @@ -48,13 +50,13 @@ void check_lantern_loaded(); extern int lanternLogEnabled; #define LLOG(...) if ((lanternLogEnabled & 1) == 1) { \ - printf("%ld INFO ", time(NULL)); \ + printf("%lld INFO ", (long long)time(NULL)); \ printf(__VA_ARGS__); \ printf("\n"); \ } \ if ((lanternLogEnabled & 2) == 2) { \ FILE *pFile = fopen("lantern.log", "a"); \ - fprintf(pFile, "%ld INFO ", time(NULL)); \ + fprintf(pFile, "%lld INFO ", (long long)time(NULL)); \ fprintf(pFile, __VA_ARGS__); \ fprintf(pFile, "\n"); \ fclose(pFile); \ @@ -2343,7 +2345,7 @@ LANTERN_API void* (LANTERN_PTR _lantern_torch_show_config) (); HOST_API void* lantern_torch_show_config () { LANTERN_CHECK_LOADED - auto ret = _lantern_torch_show_config(); + void* ret = _lantern_torch_show_config(); LANTERN_HOST_HANDLER; return ret; } @@ -2352,7 +2354,7 @@ LANTERN_API void* (LANTERN_PTR _lantern_torch_parallel_info) (); HOST_API void* lantern_torch_parallel_info () { LANTERN_CHECK_LOADED - auto ret = _lantern_torch_parallel_info(); + void* ret = _lantern_torch_parallel_info(); LANTERN_HOST_HANDLER; return ret; } @@ -2658,8 +2660,28 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_align_tensors_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern_align_tensors_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__assert_async_tensor)(void* self); HOST_API void* lantern__assert_async_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_async_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__assert_async_tensor_cstringview)(void* self, void* assert_msg); + HOST_API void* lantern__assert_async_tensor_cstringview(void* self, void* assert_msg) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_async_tensor_cstringview(self, assert_msg); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__assert_scalar_scalar_cstringview)(void* self, void* assert_msg); + HOST_API void* lantern__assert_scalar_scalar_cstringview(void* self, void* assert_msg) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_scalar_scalar_cstringview(self, assert_msg); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_assert_scalar_scalar_cstringview_tensor)(void* self, void* assert_msg, void* dep_token); + HOST_API void* lantern__functional_assert_scalar_scalar_cstringview_tensor(void* self, void* assert_msg, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_assert_scalar_scalar_cstringview_tensor(self, assert_msg, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_assert_async_tensor_cstringview_tensor)(void* self, void* assert_msg, void* dep_token); + HOST_API void* lantern__functional_assert_async_tensor_cstringview_tensor(void* self, void* assert_msg, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_assert_async_tensor_cstringview_tensor(self, assert_msg, dep_token); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype)(void* a, void* size, void* stride, void* dtype); HOST_API void* lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(void* a, void* size, void* stride, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(a, size, stride, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__print_cstringview)(void* s); + HOST_API void* lantern__print_cstringview(void* s) { LANTERN_CHECK_LOADED void* ret = _lantern__print_cstringview(s); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sym_constrain_range_scalar_intt_intt)(void* size, void* min, void* max); + HOST_API void* lantern_sym_constrain_range_scalar_intt_intt(void* size, void* min, void* max) { LANTERN_CHECK_LOADED void* ret = _lantern_sym_constrain_range_scalar_intt_intt(size, min, max); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sym_constrain_range_for_size_scalar_intt_intt)(void* size, void* min, void* max); + HOST_API void* lantern_sym_constrain_range_for_size_scalar_intt_intt(void* size, void* min, void* max) { LANTERN_CHECK_LOADED void* ret = _lantern_sym_constrain_range_for_size_scalar_intt_intt(size, min, max); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_sym_constrain_range_scalar_intt_intt_tensor)(void* size, void* min, void* max, void* dep_token); + HOST_API void* lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(size, min, max, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor)(void* size, void* min, void* max, void* dep_token); + HOST_API void* lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) { LANTERN_CHECK_LOADED void* ret = _lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(size, min, max, dep_token); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__make_dep_token_tensoroptions_memoryformat)(void* options, void* memory_format); + HOST_API void* lantern__make_dep_token_tensoroptions_memoryformat(void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern__make_dep_token_tensoroptions_memoryformat(options, memory_format); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_refine_names_tensor_dimnamelist)(void* self, void* names); HOST_API void* lantern_Tensor_refine_names_tensor_dimnamelist(void* self, void* names) { void* ret = _lantern_Tensor_refine_names_tensor_dimnamelist(self, names); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__use_cudnn_ctc_loss_tensor_tensor_intarrayref_intarrayref_intt)(void* log_probs, void* targets, void* input_lengths, void* target_lengths, void* blank); @@ -2876,12 +2898,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor__is_any_true_tensor(void* self) { void* ret = _lantern_Tensor__is_any_true_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_check_tensor_tensor)(void* self); HOST_API void* lantern__test_check_tensor_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__test_check_tensor_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_functorch_fallback_tensor_tensor)(void* self, void* other); + HOST_API void* lantern__test_functorch_fallback_tensor_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__test_functorch_fallback_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_all_tensor_intt_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_Tensor_all_tensor_intt_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_all_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_Tensor_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_all_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_out_tensor_tensor_intt_bool)(void* out, void* self, void* dim, void* keepdim); HOST_API void* lantern_all_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_out_tensor_tensor_intt_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_all_out_tensor_tensor_intarrayref_bool)(void* out, void* self, void* dim, void* keepdim); + HOST_API void* lantern_all_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_out_tensor_tensor_intarrayref_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_all_tensor_dimname_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_all_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_all_tensor_dimname_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_all_tensor_dimname_bool)(void* self, void* dim, void* keepdim); @@ -2896,8 +2926,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_any_tensor_intt_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_intt_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_Tensor_any_tensor_intt_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_any_tensor_intt_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_any_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_intarrayref_bool)(void* self, void* dim, void* keepdim); + HOST_API void* lantern_Tensor_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) { void* ret = _lantern_Tensor_any_tensor_intarrayref_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_any_out_tensor_tensor_intt_bool)(void* out, void* self, void* dim, void* keepdim); HOST_API void* lantern_any_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_out_tensor_tensor_intt_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_any_out_tensor_tensor_intarrayref_bool)(void* out, void* self, void* dim, void* keepdim); + HOST_API void* lantern_any_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_out_tensor_tensor_intarrayref_bool(out, self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_any_tensor_dimname_bool)(void* self, void* dim, void* keepdim); HOST_API void* lantern_any_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_any_tensor_dimname_bool(self, dim, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_any_tensor_dimname_bool)(void* self, void* dim, void* keepdim); @@ -3122,6 +3158,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_copysign__tensor_scalar(void* self, void* other) { void* ret = _lantern_Tensor_copysign__tensor_scalar(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_copysign_out_tensor_tensor_scalar)(void* out, void* self, void* other); HOST_API void* lantern_copysign_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_copysign_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__lazy_clone_tensor)(void* self); + HOST_API void* lantern__lazy_clone_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__lazy_clone_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__lazy_clone_tensor)(void* self); + HOST_API void* lantern_Tensor__lazy_clone_tensor(void* self) { void* ret = _lantern_Tensor__lazy_clone_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logical_not_tensor)(void* self); HOST_API void* lantern_logical_not_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_logical_not_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_logical_not_tensor)(void* self); @@ -3420,6 +3460,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); + HOST_API void* lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mps_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups); @@ -3676,6 +3718,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat(void* size, void* names, void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat(size, names, options, memory_format); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_empty_intarrayref_tensoroptions_memoryformat)(void* size, void* options, void* memory_format); HOST_API void* lantern_empty_intarrayref_tensoroptions_memoryformat(void* size, void* options, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_intarrayref_tensoroptions_memoryformat(size, options, memory_format); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_empty_permuted_intarrayref_intarrayref_tensoroptions)(void* size, void* physical_layout, void* options); + HOST_API void* lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(void* size, void* physical_layout, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(size, physical_layout, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions)(void* self, void* size, void* options); HOST_API void* lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions(void* self, void* size, void* options) { void* ret = _lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions(self, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_new_empty_strided_tensor_intarrayref_intarrayref_tensoroptions)(void* self, void* size, void* stride, void* options); @@ -3918,20 +3962,26 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* normalization, void* forward) { LANTERN_CHECK_LOADED void* ret = _lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, normalization, forward); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt)(void* is_crow, void* compressed_idx, void* plain_idx, void* cdim, void* dim, void* nnz); HOST_API void* lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt(void* is_crow, void* compressed_idx, void* plain_idx, void* cdim, void* dim, void* nnz) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt(is_crow, compressed_idx, plain_idx, cdim, dim, nnz); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_size_intt)(void* device_index); - HOST_API void* lantern__cufft_get_plan_cache_size_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_size_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_max_size_intt)(void* device_index); - HOST_API void* lantern__cufft_get_plan_cache_max_size_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_max_size_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_set_plan_cache_max_size_intt_intt)(void* device_index, void* max_size); - HOST_API void* lantern__cufft_set_plan_cache_max_size_intt_intt(void* device_index, void* max_size) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_set_plan_cache_max_size_intt_intt(device_index, max_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__cufft_clear_plan_cache_intt)(void* device_index); - HOST_API void* lantern__cufft_clear_plan_cache_intt(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_clear_plan_cache_intt(device_index); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_tensor_constclistcoptionaltensor)(void* self, void* indices); - HOST_API void* lantern_index_tensor_constclistcoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_tensor_constclistcoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_tensor_constclistcoptionaltensor)(void* self, void* indices); - HOST_API void* lantern_Tensor_index_tensor_constclistcoptionaltensor(void* self, void* indices) { void* ret = _lantern_Tensor_index_tensor_constclistcoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_out_tensor_tensor_constclistcoptionaltensor)(void* out, void* self, void* indices); - HOST_API void* lantern_index_out_tensor_tensor_constclistcoptionaltensor(void* out, void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_out_tensor_tensor_constclistcoptionaltensor(out, self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_size_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_get_plan_cache_size_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_size_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_get_plan_cache_max_size_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_get_plan_cache_max_size_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_get_plan_cache_max_size_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_set_plan_cache_max_size_deviceindex_intt)(void* device_index, void* max_size); + HOST_API void* lantern__cufft_set_plan_cache_max_size_deviceindex_intt(void* device_index, void* max_size) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_set_plan_cache_max_size_deviceindex_intt(device_index, max_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cufft_clear_plan_cache_deviceindex)(void* device_index); + HOST_API void* lantern__cufft_clear_plan_cache_deviceindex(void* device_index) { LANTERN_CHECK_LOADED void* ret = _lantern__cufft_clear_plan_cache_deviceindex(device_index); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern_Tensor_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { void* ret = _lantern_Tensor_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_out_tensor_tensor_constcliststdoptionaltensor)(void* out, void* self, void* indices); + HOST_API void* lantern_index_out_tensor_tensor_constcliststdoptionaltensor(void* out, void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern_index_out_tensor_tensor_constcliststdoptionaltensor(out, self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_index_tensor_constcliststdoptionaltensor)(void* self, void* indices); + HOST_API void* lantern__unsafe_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_index_tensor_constcliststdoptionaltensor(self, indices); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar)(void* self, void* mask, void* indices, void* fill); + HOST_API void* lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(void* self, void* mask, void* indices, void* fill) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(self, mask, indices, fill); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor)(void* self, void* mask, void* indices, void* values); + HOST_API void* lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(void* self, void* mask, void* indices, void* values) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(self, mask, indices, values); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_index_copy_out_tensor_tensor_intt_tensor_tensor)(void* out, void* self, void* dim, void* index, void* source); HOST_API void* lantern_index_copy_out_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* dim, void* index, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_index_copy_out_tensor_tensor_intt_tensor_tensor(out, self, dim, index, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_copy__tensor_intt_tensor_tensor)(void* self, void* dim, void* index, void* source); @@ -3946,16 +3996,18 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_index_copy_tensor_dimname_tensor_tensor(void* self, void* dim, void* index, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_index_copy_tensor_dimname_tensor_tensor(self, dim, index, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_copy_tensor_dimname_tensor_tensor)(void* self, void* dim, void* index, void* source); HOST_API void* lantern_Tensor_index_copy_tensor_dimname_tensor_tensor(void* self, void* dim, void* index, void* source) { void* ret = _lantern_Tensor_index_copy_tensor_dimname_tensor_tensor(self, dim, index, source); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { void* ret = _lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool)(void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* use_input_stats, void* momentum, void* eps, void* cudnn_enabled); HOST_API void* lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* use_input_stats, void* momentum, void* eps, void* cudnn_enabled) { LANTERN_CHECK_LOADED void* ret = _lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_isclose_tensor_tensor_double_double_bool)(void* self, void* other, void* rtol, void* atol, void* equal_nan); @@ -4048,6 +4100,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double(void* input, void* normalized_shape, void* weight, void* bias, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double(input, normalized_shape, weight, bias, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool)(void* grad_out, void* input, void* normalized_shape, void* mean, void* rstd, void* weight, void* bias, void* output_mask); HOST_API void* lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool(void* grad_out, void* input, void* normalized_shape, void* mean, void* rstd, void* weight, void* bias, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_rms_norm_tensor_intarrayref_tensor_double)(void* input, void* normalized_shape, void* weight, void* eps); + HOST_API void* lantern_rms_norm_tensor_intarrayref_tensor_double(void* input, void* normalized_shape, void* weight, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_rms_norm_tensor_intarrayref_tensor_double(input, normalized_shape, weight, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nan_to_num_tensor_double_double_double)(void* self, void* nan, void* posinf, void* neginf); HOST_API void* lantern_nan_to_num_tensor_double_double_double(void* self, void* nan, void* posinf, void* neginf) { LANTERN_CHECK_LOADED void* ret = _lantern_nan_to_num_tensor_double_double_double(self, nan, posinf, neginf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nan_to_num_tensor_double_double_double)(void* self, void* nan, void* posinf, void* neginf); @@ -4072,6 +4126,26 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool(void* grad_output, void* input, void* weight, void* bias_defined) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool(grad_output, input, weight, bias_defined); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool)(void* self, void* grad_output, void* weight, void* output_mask); HOST_API void* lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool(void* self, void* grad_output, void* weight, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool(self, grad_output, weight, output_mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_compress_tensor)(void* input); + HOST_API void* lantern__cslt_compress_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_compress_tensor(input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt)(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result, void* alg_id); + HOST_API void* lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result, void* alg_id) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool)(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result); + HOST_API void* lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result) { LANTERN_CHECK_LOADED void* ret = _lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_tile_tensor_cstringview_bool)(void* input, void* algorithm, void* use_cutlass); + HOST_API void* lantern__sparse_semi_structured_tile_tensor_cstringview_bool(void* input, void* algorithm, void* use_cutlass) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_tile_tensor_cstringview_bool(input, algorithm, use_cutlass); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_apply_tensor_tensor)(void* input, void* thread_masks); + HOST_API void* lantern__sparse_semi_structured_apply_tensor_tensor(void* input, void* thread_masks) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_apply_tensor_tensor(input, thread_masks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_apply_dense_tensor_tensor)(void* input, void* thread_masks); + HOST_API void* lantern__sparse_semi_structured_apply_dense_tensor_tensor(void* input, void* thread_masks) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_apply_dense_tensor_tensor(input, thread_masks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype)(void* input, void* weight, void* meta, void* bias, void* activation, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(void* input, void* weight, void* meta, void* bias, void* activation, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(input, weight, meta, bias, activation, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype)(void* mat1, void* mat1_meta, void* mat2, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(void* mat1, void* mat1_meta, void* mat2, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(mat1, mat1_meta, mat2, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype)(void* input, void* mat1, void* mat1_meta, void* mat2, void* alpha, void* beta, void* out_dtype); + HOST_API void* lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(void* input, void* mat1, void* mat1_meta, void* mat2, void* alpha, void* beta, void* out_dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview)(void* input, void* weight, void* scale, void* bias, void* activation); + HOST_API void* lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(void* input, void* weight, void* scale, void* bias, void* activation) { LANTERN_CHECK_LOADED void* ret = _lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(input, weight, scale, bias, activation); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor)(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias); HOST_API void* lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_int8_weight_tensor_tensor_tensor_tensor_scalar_scalar_tensor)(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias); @@ -4080,6 +4154,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_fbgemm_linear_quantize_weight_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_quantize_weight_tensor(input); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_pack_gemm_matrix_fp16_tensor)(void* input); HOST_API void* lantern_fbgemm_pack_gemm_matrix_fp16_tensor(void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_pack_gemm_matrix_fp16_tensor(input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor)(void* weight, void* weight_scale, void* weight_zero_point, void* bias); + HOST_API void* lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(void* weight, void* weight_scale, void* weight_zero_point, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(weight, weight_scale, weight_zero_point, bias); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* input, void* input_scale, void* input_zero_point, void* packed_weight, void* output_scale, void* output_zero_point, void* out_channel); + HOST_API void* lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* input, void* input_scale, void* input_zero_point, void* packed_weight, void* output_scale, void* output_zero_point, void* out_channel) { LANTERN_CHECK_LOADED void* ret = _lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor)(void* input, void* packed_weight, void* bias); HOST_API void* lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(void* input, void* packed_weight, void* bias) { LANTERN_CHECK_LOADED void* ret = _lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(input, packed_weight, bias); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fbgemm_linear_fp16_weight_tensor_tensor_tensor)(void* input, void* packed_weight, void* bias); @@ -4100,8 +4178,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_ldexp_out_tensor_tensor_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_ldexp_out_tensor_tensor_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linspace_scalar_scalar_intt_tensoroptions)(void* start, void* end, void* steps, void* options); HOST_API void* lantern_linspace_scalar_scalar_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_scalar_scalar_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_tensor_tensor_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_tensor_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_tensor_tensor_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_tensor_scalar_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_tensor_scalar_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_tensor_scalar_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_scalar_tensor_intt_tensoroptions)(void* start, void* end, void* steps, void* options); + HOST_API void* lantern_linspace_scalar_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_scalar_tensor_intt_tensoroptions(start, end, steps, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_scalar_scalar_intt)(void* out, void* start, void* end, void* steps); HOST_API void* lantern_linspace_out_tensor_scalar_scalar_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_scalar_scalar_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_tensor_tensor_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_tensor_tensor_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_tensor_tensor_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_tensor_scalar_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_tensor_scalar_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_tensor_scalar_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_linspace_out_tensor_scalar_tensor_intt)(void* out, void* start, void* end, void* steps); + HOST_API void* lantern_linspace_out_tensor_scalar_tensor_intt(void* out, void* start, void* end, void* steps) { LANTERN_CHECK_LOADED void* ret = _lantern_linspace_out_tensor_scalar_tensor_intt(out, start, end, steps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_log_tensor)(void* self); HOST_API void* lantern_log_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_log_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_log_tensor)(void* self); @@ -4180,8 +4270,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_xlogy_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_xlogy_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logspace_scalar_scalar_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); HOST_API void* lantern_logspace_scalar_scalar_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_scalar_scalar_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_tensor_tensor_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_tensor_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_tensor_tensor_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_tensor_scalar_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_tensor_scalar_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_tensor_scalar_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_scalar_tensor_intt_double_tensoroptions)(void* start, void* end, void* steps, void* base, void* options); + HOST_API void* lantern_logspace_scalar_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_scalar_tensor_intt_double_tensoroptions(start, end, steps, base, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_scalar_scalar_intt_double)(void* out, void* start, void* end, void* steps, void* base); HOST_API void* lantern_logspace_out_tensor_scalar_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_scalar_scalar_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_tensor_tensor_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_tensor_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_tensor_tensor_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_tensor_scalar_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_tensor_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_tensor_scalar_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_logspace_out_tensor_scalar_tensor_intt_double)(void* out, void* start, void* end, void* steps, void* base); + HOST_API void* lantern_logspace_out_tensor_scalar_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) { LANTERN_CHECK_LOADED void* ret = _lantern_logspace_out_tensor_scalar_tensor_intt_double(out, start, end, steps, base); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_log_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); HOST_API void* lantern_log_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_log_softmax_tensor_intt_scalartype(self, dim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_log_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); @@ -4304,12 +4406,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); + HOST_API void* lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mean_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_mean_tensor_scalartype(void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_mean_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_Tensor_mean_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_mean_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mean_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); + HOST_API void* lantern_mean_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mean_tensor_intarrayref_bool_scalartype)(void* self, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_mean_tensor_intarrayref_bool_scalartype(void* self, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_mean_tensor_intarrayref_bool_scalartype(self, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_mean_tensor_intarrayref_bool_scalartype)(void* self, void* dim, void* keepdim, void* dtype); @@ -4412,6 +4518,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_mm_tensor_tensor(void* self, void* mat2) { void* ret = _lantern_Tensor_mm_tensor_tensor(self, mat2); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mm_out_tensor_tensor_tensor)(void* out, void* self, void* mat2); HOST_API void* lantern_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern_mm_out_tensor_tensor_tensor(out, self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__int_mm_tensor_tensor)(void* self, void* mat2); + HOST_API void* lantern__int_mm_tensor_tensor(void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern__int_mm_tensor_tensor(self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__int_mm_out_tensor_tensor_tensor)(void* out, void* self, void* mat2); + HOST_API void* lantern__int_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) { LANTERN_CHECK_LOADED void* ret = _lantern__int_mm_out_tensor_tensor_tensor(out, self, mat2); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__convert_weight_to_int4pack_tensor_intt)(void* self, void* innerKTiles); + HOST_API void* lantern__convert_weight_to_int4pack_tensor_intt(void* self, void* innerKTiles) { LANTERN_CHECK_LOADED void* ret = _lantern__convert_weight_to_int4pack_tensor_intt(self, innerKTiles); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__weight_int4pack_mm_tensor_tensor_intt_tensor)(void* self, void* mat2, void* qGroupSize, void* qScaleAndZeros); + HOST_API void* lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(void* self, void* mat2, void* qGroupSize, void* qScaleAndZeros) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(self, mat2, qGroupSize, qScaleAndZeros); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__weight_int8pack_mm_tensor_tensor_tensor)(void* self, void* mat2, void* scales); + HOST_API void* lantern__weight_int8pack_mm_tensor_tensor_tensor(void* self, void* mat2, void* scales) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_int8pack_mm_tensor_tensor_tensor(self, mat2, scales); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_mm_tensor_tensor)(void* sparse, void* dense); HOST_API void* lantern__sparse_mm_tensor_tensor(void* sparse, void* dense) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_mm_tensor_tensor(sparse, dense); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_mm_tensor_tensor_cstringview)(void* sparse, void* dense, void* reduce); @@ -4492,6 +4608,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double(input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* training, void* momentum, void* eps); @@ -4512,8 +4630,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_invstd, void* train, void* eps, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g); HOST_API void* lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count); - HOST_API void* lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count); + HOST_API void* lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_update_stats_tensor_tensor_tensor_double)(void* input, void* running_mean, void* running_var, void* momentum); HOST_API void* lantern_batch_norm_update_stats_tensor_tensor_tensor_double(void* input, void* running_mean, void* running_var, void* momentum) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_update_stats_tensor_tensor_tensor_double(input, running_mean, running_var, momentum); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_is_vulkan_available)(); @@ -4932,6 +5050,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_slice_tensor_intt_intt_intt_intt(void* self, void* dim, void* start, void* end, void* step) { void* ret = _lantern_Tensor_slice_tensor_intt_intt_intt_intt(self, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt)(void* grad_output, void* input_sizes, void* dim, void* start, void* end, void* step); HOST_API void* lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt(void* grad_output, void* input_sizes, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt(grad_output, input_sizes, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); + HOST_API void* lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); + HOST_API void* lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { void* ret = _lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); HOST_API void* lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(self, src, dim, start, end, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_slice_scatter_tensor_tensor_intt_intt_intt_intt)(void* self, void* src, void* dim, void* start, void* end, void* step); @@ -5044,6 +5166,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar(void* self, void* mat1, void* mat2, void* beta, void* alpha) { void* ret = _lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar(self, mat1, mat2, beta, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar)(void* out, void* self, void* mat1, void* mat2, void* beta, void* alpha); HOST_API void* lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar(void* out, void* self, void* mat1, void* mat2, void* beta, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar(out, self, mat1, mat2, beta, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__chunk_cat_tensorlist_intt_intt)(void* tensors, void* dim, void* num_chunks); + HOST_API void* lantern__chunk_cat_tensorlist_intt_intt(void* tensors, void* dim, void* num_chunks) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_cat_tensorlist_intt_intt(tensors, dim, num_chunks); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__chunk_cat_out_tensor_tensorlist_intt_intt)(void* out, void* tensors, void* dim, void* num_chunks); + HOST_API void* lantern__chunk_cat_out_tensor_tensorlist_intt_intt(void* out, void* tensors, void* dim, void* num_chunks) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_cat_out_tensor_tensorlist_intt_intt(out, tensors, dim, num_chunks); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_stack_tensorlist_intt)(void* tensors, void* dim); HOST_API void* lantern_stack_tensorlist_intt(void* tensors, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_stack_tensorlist_intt(tensors, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_stack_out_tensor_tensorlist_intt)(void* out, void* tensors, void* dim); @@ -5138,36 +5264,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_std_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_std_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_bool)(void* self, void* unbiased); HOST_API void* lantern_std_mean_tensor_bool(void* self, void* unbiased) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_bool(self, unbiased); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_intarrayref_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_intarrayref_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_std_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_std_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_std_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_out_tensor_tensor_dimnamelist_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_out_tensor_tensor_dimnamelist_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_prod_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_prod_tensor_scalartype(void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_prod_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_prod_tensor_scalartype)(void* self, void* dtype); @@ -5284,14 +5410,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor__nested_tensor_size_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_size_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_strides_tensor)(void* self); HOST_API void* lantern_Tensor__nested_tensor_strides_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_strides_tensor(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_offsets_tensor)(void* self); - HOST_API void* lantern_Tensor__nested_tensor_offsets_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__nested_tensor_storage_offsets_tensor)(void* self); + HOST_API void* lantern_Tensor__nested_tensor_storage_offsets_tensor(void* self) { void* ret = _lantern_Tensor__nested_tensor_storage_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_from_padded_and_nested_example_tensor_tensor)(void* padded, void* nt_example); HOST_API void* lantern__nested_from_padded_and_nested_example_tensor_tensor(void* padded, void* nt_example) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_from_padded_and_nested_example_tensor_tensor(padded, nt_example); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref)(void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref)(void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor)(void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor)(void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_tensor)(void* self); + HOST_API void* lantern__nested_get_values_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_copy_tensor)(void* self); + HOST_API void* lantern__nested_get_values_copy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_copy_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_offsets_tensor)(void* self); + HOST_API void* lantern__nested_get_offsets_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_offsets_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_lengths_tensor)(void* self); + HOST_API void* lantern__nested_get_lengths_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_lengths_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_ragged_idx_tensor)(void* self); + HOST_API void* lantern__nested_get_ragged_idx_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_ragged_idx_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_min_seqlen_tensor)(void* self); + HOST_API void* lantern__nested_get_min_seqlen_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_min_seqlen_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_max_seqlen_tensor)(void* self); + HOST_API void* lantern__nested_get_max_seqlen_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_max_seqlen_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_jagged_dummy_tensor)(void* any); + HOST_API void* lantern__nested_get_jagged_dummy_tensor(void* any) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_jagged_dummy_tensor(any); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_compute_contiguous_strides_offsets_tensor)(void* nested_size); + HOST_API void* lantern__nested_compute_contiguous_strides_offsets_tensor(void* nested_size) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_compute_contiguous_strides_offsets_tensor(nested_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim); HOST_API void* lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_triplet_margin_loss_tensor_tensor_tensor_double_double_double_bool_intt)(void* anchor, void* positive, void* negative, void* margin, void* p, void* eps, void* swap, void* reduction); @@ -5348,36 +5496,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_var_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_var_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_intarrayref_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_intarrayref_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_Tensor_var_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_bool_bool)(void* out, void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_bool_bool(out, self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_Tensor_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_intt_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_intt_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_var_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_Tensor_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { void* ret = _lantern_Tensor_var_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_out_tensor_tensor_dimnamelist_scalar_bool)(void* out, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_out_tensor_tensor_dimnamelist_scalar_bool(out, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_bool)(void* self, void* unbiased); HOST_API void* lantern_var_mean_tensor_bool(void* self, void* unbiased) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_bool(self, unbiased); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_intarrayref_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_intarrayref_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_bool_bool)(void* self, void* dim, void* unbiased, void* keepdim); HOST_API void* lantern_var_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void* unbiased, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_bool_bool(self, dim, unbiased, keepdim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_intt_bool)(void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_intt_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_tensor_dimnamelist_scalar_bool)(void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_tensor_dimnamelist_scalar_bool(self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_view_as_tensor_tensor)(void* self, void* other); HOST_API void* lantern_Tensor_view_as_tensor_tensor(void* self, void* other) { void* ret = _lantern_Tensor_view_as_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_where_tensor_tensor_tensor)(void* condition, void* self, void* other); @@ -5432,6 +5580,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_norm_tensor_scalar(void* self, void* p) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_tensor_scalar(self, p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype)(void* self, void* p, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype(void* self, void* p, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype(self, p, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out, void* save_mean, void* save_invstd, void* reserve, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out, void* save_mean, void* save_invstd, void* reserve, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out, save_mean, save_invstd, reserve, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor)(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* update, void* eps, void* output_mask, void* reserve); + HOST_API void* lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* update, void* eps, void* output_mask, void* reserve) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_tensor)(void* self); HOST_API void* lantern__sparse_sum_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_sum_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_tensor_scalartype)(void* self, void* dtype); @@ -5598,6 +5754,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu) { LANTERN_CHECK_LOADED void* ret = _lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(self, mat1, mat2, beta, alpha, use_gelu); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool)(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu); HOST_API void* lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(void* self, void* mat1, void* mat2, void* beta, void* alpha, void* use_gelu) { void* ret = _lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool(self, mat1, mat2, beta, alpha, use_gelu); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool)(void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum); + HOST_API void* lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool)(void* out, void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum); + HOST_API void* lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* out, void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(out, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions)(void* nnz, void* dense_dim, void* size, void* blocksize, void* index_dtype, void* options); + HOST_API void* lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(void* nnz, void* dense_dim, void* size, void* blocksize, void* index_dtype, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(nnz, dense_dim, size, blocksize, index_dtype, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions)(void* compressed_indices, void* plain_indices, void* values, void* size, void* options); HOST_API void* lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(void* compressed_indices, void* plain_indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(compressed_indices, plain_indices, values, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_csr_tensor_tensor_tensor_tensor_intarrayref_tensoroptions)(void* crow_indices, void* col_indices, void* values, void* size, void* options); @@ -5630,14 +5792,14 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions(void* ccol_indices, void* row_indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions(ccol_indices, row_indices, values, size, options); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_intarrayref_tensoroptions)(void* size, void* options); HOST_API void* lantern_sparse_coo_tensor_intarrayref_tensoroptions(void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_intarrayref_tensoroptions(size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions)(void* indices, void* values, void* options); - HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(void* indices, void* values, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(indices, values, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions)(void* indices, void* values, void* size, void* options); - HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(indices, values, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions)(void* indices, void* values, void* size, void* options); - HOST_API void* lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(indices, values, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref)(void* indices, void* values, void* size); - HOST_API void* lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(void* indices, void* values, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(indices, values, size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool)(void* indices, void* values, void* options, void* is_coalesced); + HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(void* indices, void* values, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(indices, values, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool)(void* indices, void* values, void* size, void* options, void* is_coalesced); + HOST_API void* lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(indices, values, size, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool)(void* indices, void* values, void* size, void* options, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(indices, values, size, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool)(void* indices, void* values, void* size, void* is_coalesced); + HOST_API void* lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(void* indices, void* values, void* size, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(indices, values, size, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout)(void* compressed_indices, void* plain_indices, void* values, void* size, void* layout); HOST_API void* lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout(void* compressed_indices, void* plain_indices, void* values, void* size, void* layout) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout(compressed_indices, plain_indices, values, size, layout); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__validate_sparse_csr_tensor_args_tensor_tensor_tensor_intarrayref)(void* crow_indices, void* col_indices, void* values, void* size); @@ -5650,22 +5812,24 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref(void* ccol_indices, void* row_indices, void* values, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref(ccol_indices, row_indices, values, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions)(void* sparse_dim, void* dense_dim, void* size, void* options); HOST_API void* lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions(void* sparse_dim, void* dense_dim, void* size, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions(sparse_dim, dense_dim, size, options); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions)(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options); - HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(sparse_dim, dense_dim, size, indices, values, options); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool)(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(sparse_dim, dense_dim, size, indices, values, options, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { void* ret = _lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { void* ret = _lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_mask_tensor_tensor)(void* self, void* mask); HOST_API void* lantern_Tensor_sparse_mask_tensor_tensor(void* self, void* mask) { void* ret = _lantern_Tensor_sparse_mask_tensor_tensor(self, mask); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__sparse_mask_projection_tensor_tensor_bool)(void* self, void* mask, void* accumulate_matches); + HOST_API void* lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(void* self, void* mask, void* accumulate_matches) { void* ret = _lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(self, mask, accumulate_matches); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__to_cpu_tensorlist)(void* tensors); HOST_API void* lantern__to_cpu_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern__to_cpu_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_dense_tensor_scalartype)(void* self, void* dtype); - HOST_API void* lantern_Tensor_to_dense_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_to_dense_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_dense_tensor_scalartype)(void* self, void* dtype); - HOST_API void* lantern_Tensor__to_dense_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor__to_dense_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_dense_backward_tensor_tensor)(void* grad, void* input); - HOST_API void* lantern_to_dense_backward_tensor_tensor(void* grad, void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_to_dense_backward_tensor_tensor(grad, input); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_dense_tensor_scalartype_bool)(void* self, void* dtype, void* masked_grad); + HOST_API void* lantern_Tensor_to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { void* ret = _lantern_Tensor_to_dense_tensor_scalartype_bool(self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_dense_tensor_scalartype_bool)(void* self, void* dtype, void* masked_grad); + HOST_API void* lantern_Tensor__to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { void* ret = _lantern_Tensor__to_dense_tensor_scalartype_bool(self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_to_dense_backward_tensor_tensor_bool)(void* grad, void* input, void* masked_grad); + HOST_API void* lantern_to_dense_backward_tensor_tensor_bool(void* grad, void* input, void* masked_grad) { LANTERN_CHECK_LOADED void* ret = _lantern_to_dense_backward_tensor_tensor_bool(grad, input, masked_grad); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_sparse_dim_tensor)(void* self); HOST_API void* lantern_Tensor_sparse_dim_tensor(void* self) { void* ret = _lantern_Tensor_sparse_dim_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor__dimi_tensor)(void* self); @@ -5716,22 +5880,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_Tensor_unbind_tensor_dimname(void* self, void* dim) { void* ret = _lantern_Tensor_unbind_tensor_dimname(self, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_tensor_intt)(void* self, void* sparse_dim); HOST_API void* lantern_Tensor_to_sparse_tensor_intt(void* self, void* sparse_dim) { void* ret = _lantern_Tensor_to_sparse_tensor_intt(self, sparse_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_tensor_intt)(void* self, void* sparse_dim); + HOST_API void* lantern_Tensor__to_sparse_tensor_intt(void* self, void* sparse_dim) { void* ret = _lantern_Tensor__to_sparse_tensor_intt(self, sparse_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt)(void* self, void* layout, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt)(void* self, void* layout, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_csr_tensor_intt)(void* self, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_csr_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_csr_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_csr_tensor_intt)(void* self, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_csr_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_csr_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_csc_tensor_intt)(void* self, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_csc_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_csc_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_csc_tensor_intt)(void* self, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_csc_tensor_intt(void* self, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_csc_tensor_intt(self, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); HOST_API void* lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt)(void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { void* ret = _lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_semi_structured_tensor)(void* dense); + HOST_API void* lantern__to_sparse_semi_structured_tensor(void* dense) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_semi_structured_tensor(dense); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_mkldnn_tensor_scalartype)(void* self, void* dtype); HOST_API void* lantern_Tensor_to_mkldnn_tensor_scalartype(void* self, void* dtype) { void* ret = _lantern_Tensor_to_mkldnn_tensor_scalartype(self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); HOST_API void* lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt)(void* self, void* padding, void* stride, void* dilation, void* groups); - HOST_API void* lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(void* self, void* padding, void* stride, void* dilation, void* groups) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(self, padding, stride, dilation, groups); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); + HOST_API void* lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_to_mkldnn_backward_tensor_tensor)(void* grad, void* input); HOST_API void* lantern_to_mkldnn_backward_tensor_tensor(void* grad, void* input) { LANTERN_CHECK_LOADED void* ret = _lantern_to_mkldnn_backward_tensor_tensor(grad, input); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_dynamic_tensor_scalartype_bool)(void* self, void* dtype, void* reduce_range); @@ -5846,8 +6024,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_result_type_scalar_tensor(void* scalar, void* tensor) { LANTERN_CHECK_LOADED void* ret = _lantern_result_type_scalar_tensor(scalar, tensor); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_result_type_scalar_scalar)(void* scalar1, void* scalar2); HOST_API void* lantern_result_type_scalar_scalar(void* scalar1, void* scalar2) { LANTERN_CHECK_LOADED void* ret = _lantern_result_type_scalar_scalar(scalar1, scalar2); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_can_cast_scalartype_scalartype)(void* from, void* to); - HOST_API void* lantern_can_cast_scalartype_scalartype(void* from, void* to) { LANTERN_CHECK_LOADED void* ret = _lantern_can_cast_scalartype_scalartype(from, to); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_can_cast_scalartype_scalartype)(void* from_, void* to); + HOST_API void* lantern_can_cast_scalartype_scalartype(void* from_, void* to) { LANTERN_CHECK_LOADED void* ret = _lantern_can_cast_scalartype_scalartype(from_, to); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_promote_types_scalartype_scalartype)(void* type1, void* type2); HOST_API void* lantern_promote_types_scalartype_scalartype(void* type1, void* type2) { LANTERN_CHECK_LOADED void* ret = _lantern_promote_types_scalartype_scalartype(type1, type2); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__local_scalar_dense_tensor)(void* self); @@ -5944,6 +6122,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_masked_scatter_tensor_tensor_tensor(void* self, void* mask, void* source) { LANTERN_CHECK_LOADED void* ret = _lantern_masked_scatter_tensor_tensor_tensor(self, mask, source); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_masked_scatter_tensor_tensor_tensor)(void* self, void* mask, void* source); HOST_API void* lantern_Tensor_masked_scatter_tensor_tensor_tensor(void* self, void* mask, void* source) { void* ret = _lantern_Tensor_masked_scatter_tensor_tensor_tensor(self, mask, source); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_masked_scatter_backward_tensor_tensor_intarrayref)(void* grad_output, void* mask, void* sizes); + HOST_API void* lantern_masked_scatter_backward_tensor_tensor_intarrayref(void* grad_output, void* mask, void* sizes) { LANTERN_CHECK_LOADED void* ret = _lantern_masked_scatter_backward_tensor_tensor_intarrayref(grad_output, mask, sizes); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__masked_softmax_tensor_tensor_intt_intt)(void* self, void* mask, void* dim, void* mask_type); HOST_API void* lantern__masked_softmax_tensor_tensor_intt_intt(void* self, void* mask, void* dim, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__masked_softmax_tensor_tensor_intt_intt(self, mask, dim, mask_type); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__masked_softmax_backward_tensor_tensor_tensor_intt)(void* grad_output, void* output, void* mask, void* dim); @@ -6496,6 +6676,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_nonzero_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_tensor)(void* self); HOST_API void* lantern_Tensor_nonzero_tensor(void* self) { void* ret = _lantern_Tensor_nonzero_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_nonzero_static_out_tensor_tensor_intt_intt)(void* out, void* self, void* size, void* fill_value); + HOST_API void* lantern_nonzero_static_out_tensor_tensor_intt_intt(void* out, void* self, void* size, void* fill_value) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_static_out_tensor_tensor_intt_intt(out, self, size, fill_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_nonzero_static_tensor_intt_intt)(void* self, void* size, void* fill_value); + HOST_API void* lantern_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_static_tensor_intt_intt(self, size, fill_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_static_tensor_intt_intt)(void* self, void* size, void* fill_value); + HOST_API void* lantern_Tensor_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) { void* ret = _lantern_Tensor_nonzero_static_tensor_intt_intt(self, size, fill_value); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nonzero_numpy_tensor)(void* self); HOST_API void* lantern_nonzero_numpy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_nonzero_numpy_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_nonzero_numpy_tensor)(void* self); @@ -6814,6 +7000,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_min_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_min_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_min_tensor)(void* self); HOST_API void* lantern_Tensor_min_tensor(void* self) { void* ret = _lantern_Tensor_min_tensor(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_min_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern_min_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_min_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fmin_tensor_tensor)(void* self, void* other); HOST_API void* lantern_fmin_tensor_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_fmin_tensor_tensor(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_fmin_tensor_tensor)(void* self, void* other); @@ -6918,6 +7106,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_argsort_tensor_bool_intt_bool(void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_tensor_bool_intt_bool(self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_argsort_tensor_bool_intt_bool)(void* self, void* stable, void* dim, void* descending); HOST_API void* lantern_Tensor_argsort_tensor_bool_intt_bool(void* self, void* stable, void* dim, void* descending) { void* ret = _lantern_Tensor_argsort_tensor_bool_intt_bool(self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_argsort_out_tensor_tensor_bool_intt_bool)(void* out, void* self, void* stable, void* dim, void* descending); + HOST_API void* lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_out_tensor_tensor_bool_intt_bool(out, self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_argsort_tensor_dimname_bool)(void* self, void* dim, void* descending); HOST_API void* lantern_argsort_tensor_dimname_bool(void* self, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_tensor_dimname_bool(self, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_argsort_tensor_dimname_bool)(void* self, void* dim, void* descending); @@ -7020,108 +7210,134 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_add_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_scalar)(void* self, void* scalar); HOST_API void* lantern__foreach_add__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_scalar)(void* self, void* scalar); - HOST_API void* lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_tensor_scalar)(void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add_tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_tensor_scalar)(void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add__tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_tensorlist_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_tensorlist_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_mul_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_mul__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_mul_tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_mul__tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_div_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_tensorlist)(void* self, void* other); HOST_API void* lantern__foreach_div__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_tensorlist)(void* self, void* other); - HOST_API void* lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_div_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_div__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_arrayrefscalar)(void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_div_tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div__tensorlist_tensor)(void* self, void* other); + HOST_API void* lantern__foreach_div__tensorlist_tensor(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div__tensorlist_tensor(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min__tensorlist_arrayrefscalar)(void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_maximum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_maximum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_scalar)(void* self, void* scalar); + HOST_API void* lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_scalar(self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_tensorlist)(void* self, void* other); + HOST_API void* lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_tensorlist(self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_minimum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum__tensorlist_arrayrefscalar)(void* self, void* scalars); HOST_API void* lantern__foreach_minimum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum__tensorlist_arrayrefscalar(self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_tensorlist)(void* self); - HOST_API void* lantern__foreach_exp_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero__tensorlist)(void* self); - HOST_API void* lantern__foreach_zero__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp__tensorlist)(void* self); - HOST_API void* lantern__foreach_exp__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_tensorlist)(void* self); - HOST_API void* lantern__foreach_sqrt_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt__tensorlist)(void* self); - HOST_API void* lantern__foreach_sqrt__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs_tensorlist)(void* self); HOST_API void* lantern__foreach_abs_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_abs_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs__tensorlist)(void* self); @@ -7158,6 +7374,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_erfc_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_erfc__tensorlist)(void* self); HOST_API void* lantern__foreach_erfc__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_tensorlist)(void* self); + HOST_API void* lantern__foreach_exp_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp__tensorlist)(void* self); + HOST_API void* lantern__foreach_exp__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1_tensorlist)(void* self); HOST_API void* lantern__foreach_expm1_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_expm1_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1__tensorlist)(void* self); @@ -7166,6 +7386,22 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_floor_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_floor__tensorlist)(void* self); HOST_API void* lantern__foreach_floor__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_tensorlist)(void* self); + HOST_API void* lantern__foreach_frac_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac__tensorlist)(void* self); + HOST_API void* lantern__foreach_frac__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_tensorlist)(void* self); + HOST_API void* lantern__foreach_lgamma_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma__tensorlist)(void* self); + HOST_API void* lantern__foreach_lgamma__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log_tensorlist)(void* self); HOST_API void* lantern__foreach_log_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log__tensorlist)(void* self); @@ -7182,84 +7418,74 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_log2_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log2__tensorlist)(void* self); HOST_API void* lantern__foreach_log2__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_max_tensorlist)(void* self); + HOST_API void* lantern__foreach_max_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_max_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg_tensorlist)(void* self); HOST_API void* lantern__foreach_neg_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg__tensorlist)(void* self); HOST_API void* lantern__foreach_neg__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_tensorlist)(void* self); - HOST_API void* lantern__foreach_tan_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan__tensorlist)(void* self); - HOST_API void* lantern__foreach_tan__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_tensorlist)(void* self); - HOST_API void* lantern__foreach_tanh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh__tensorlist)(void* self); - HOST_API void* lantern__foreach_tanh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_tensorlist)(void* self); - HOST_API void* lantern__foreach_sin_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin__tensorlist)(void* self); - HOST_API void* lantern__foreach_sin__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_tensorlist)(void* self); - HOST_API void* lantern__foreach_sinh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh__tensorlist)(void* self); - HOST_API void* lantern__foreach_sinh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_tensorlist)(void* self); - HOST_API void* lantern__foreach_round_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round__tensorlist)(void* self); - HOST_API void* lantern__foreach_round__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_tensorlist)(void* self); - HOST_API void* lantern__foreach_lgamma_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma__tensorlist)(void* self); - HOST_API void* lantern__foreach_lgamma__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_tensorlist)(void* self); - HOST_API void* lantern__foreach_frac_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac__tensorlist)(void* self); - HOST_API void* lantern__foreach_frac__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_tensorlist_scalar_scalartype)(void* self, void* ord, void* dtype); + HOST_API void* lantern__foreach_norm_tensorlist_scalar_scalartype(void* self, void* ord, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_tensorlist_scalar_scalartype(self, ord, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_scalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_scalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_scalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_tensorlist_arrayrefscalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_tensorlist_arrayrefscalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_scalar_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow_scalar_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_scalar_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_tensorlist)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_tensorlist(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_tensorlist(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_scalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_scalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_scalar(self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow__tensorlist_arrayrefscalar)(void* self, void* exponent); + HOST_API void* lantern__foreach_pow__tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow__tensorlist_arrayrefscalar(self, exponent); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal_tensorlist)(void* self); HOST_API void* lantern__foreach_reciprocal_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal__tensorlist)(void* self); HOST_API void* lantern__foreach_reciprocal__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_tensorlist)(void* self); + HOST_API void* lantern__foreach_round_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round__tensorlist)(void* self); + HOST_API void* lantern__foreach_round__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid_tensorlist)(void* self); HOST_API void* lantern__foreach_sigmoid_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid__tensorlist)(void* self); HOST_API void* lantern__foreach_sigmoid__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign_tensorlist)(void* self); + HOST_API void* lantern__foreach_sign_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign__tensorlist)(void* self); + HOST_API void* lantern__foreach_sign__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_tensorlist)(void* self); + HOST_API void* lantern__foreach_sin_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin__tensorlist)(void* self); + HOST_API void* lantern__foreach_sin__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_tensorlist)(void* self); + HOST_API void* lantern__foreach_sinh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh__tensorlist)(void* self); + HOST_API void* lantern__foreach_sinh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_tensorlist)(void* self); + HOST_API void* lantern__foreach_sqrt_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt__tensorlist)(void* self); + HOST_API void* lantern__foreach_sqrt__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_tensorlist)(void* self); + HOST_API void* lantern__foreach_tan_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan__tensorlist)(void* self); + HOST_API void* lantern__foreach_tan__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_tensorlist)(void* self); + HOST_API void* lantern__foreach_tanh_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh__tensorlist)(void* self); + HOST_API void* lantern__foreach_tanh__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh__tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc_tensorlist)(void* self); HOST_API void* lantern__foreach_trunc_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc_tensorlist(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc__tensorlist)(void* self); HOST_API void* lantern__foreach_trunc__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc__tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar)(void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor)(void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_tensorlist_scalar)(void* self, void* ord); - HOST_API void* lantern__foreach_norm_tensorlist_scalar(void* self, void* ord) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_tensorlist_scalar(self, ord); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist)(void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp__tensorlist_tensorlist_scalar)(void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp__tensorlist_tensorlist_scalar(self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero__tensorlist)(void* self); + HOST_API void* lantern__foreach_zero__tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero__tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy__tensorlist_tensorlist_bool)(void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy__tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy__tensorlist_tensorlist_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy_tensorlist_tensorlist_bool)(void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy_tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy_tensorlist_tensorlist_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_tensor_tensor_bool_bool)(void* self, void* boundaries, void* out_int32, void* right); HOST_API void* lantern_bucketize_tensor_tensor_bool_bool(void* self, void* boundaries, void* out_int32, void* right) { LANTERN_CHECK_LOADED void* ret = _lantern_bucketize_tensor_tensor_bool_bool(self, boundaries, out_int32, right); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_out_tensor_tensor_tensor_bool_bool)(void* out, void* self, void* boundaries, void* out_int32, void* right); @@ -7272,6 +7498,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor)(void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); HOST_API void* lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor(void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor(sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor)(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); + HOST_API void* lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__convert_indices_from_coo_to_csr_tensor_intt_bool)(void* self, void* size, void* out_int32); HOST_API void* lantern__convert_indices_from_coo_to_csr_tensor_intt_bool(void* self, void* size, void* out_int32) { LANTERN_CHECK_LOADED void* ret = _lantern__convert_indices_from_coo_to_csr_tensor_intt_bool(self, size, out_int32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__convert_indices_from_coo_to_csr_out_tensor_tensor_intt_bool)(void* out, void* self, void* size, void* out_int32); @@ -8132,6 +8360,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_linalg_eig_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eig_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eig_out_tensor_tensor_tensor)(void* eigenvalues, void* eigenvectors, void* self); HOST_API void* lantern_linalg_eig_out_tensor_tensor_tensor(void* eigenvalues, void* eigenvectors, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eig_out_tensor_tensor_tensor(eigenvalues, eigenvectors, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__linalg_eigvals_tensor)(void* self); + HOST_API void* lantern__linalg_eigvals_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__linalg_eigvals_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eigvals_tensor)(void* self); HOST_API void* lantern_linalg_eigvals_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_eigvals_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_eigvals_out_tensor_tensor)(void* out, void* self); @@ -8250,6 +8480,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool(void* result, void* info, void* A, void* B, void* left, void* check_errors) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool(result, info, A, B, left, check_errors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_solve_tensor_tensor_bool)(void* A, void* B, void* left); HOST_API void* lantern_linalg_solve_tensor_tensor_bool(void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_tensor_tensor_bool(A, B, left); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__spsolve_tensor_tensor_bool)(void* A, void* B, void* left); + HOST_API void* lantern__spsolve_tensor_tensor_bool(void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern__spsolve_tensor_tensor_bool(A, B, left); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_solve_out_tensor_tensor_tensor_bool)(void* out, void* A, void* B, void* left); HOST_API void* lantern_linalg_solve_out_tensor_tensor_tensor_bool(void* out, void* A, void* B, void* left) { LANTERN_CHECK_LOADED void* ret = _lantern_linalg_solve_out_tensor_tensor_tensor_bool(out, A, B, left); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_linalg_tensorinv_tensor_intt)(void* self, void* ind); @@ -8292,6 +8524,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_nested_to_padded_tensor_tensor_double_intarrayref(void* self, void* padding, void* output_size) { LANTERN_CHECK_LOADED void* ret = _lantern_nested_to_padded_tensor_tensor_double_intarrayref(self, padding, output_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_serialization_subcmul_tensor_tensor_scalar)(void* self, void* other, void* alpha); HOST_API void* lantern__test_serialization_subcmul_tensor_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__test_serialization_subcmul_tensor_tensor_scalar(self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_parallel_materialize_tensor_intt_bool)(void* self, void* num_parallel, void* skip_first); + HOST_API void* lantern__test_parallel_materialize_tensor_intt_bool(void* self, void* num_parallel, void* skip_first) { LANTERN_CHECK_LOADED void* ret = _lantern__test_parallel_materialize_tensor_intt_bool(self, num_parallel, skip_first); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_optional_intlist_tensor_intarrayref)(void* values, void* addends); HOST_API void* lantern__test_optional_intlist_tensor_intarrayref(void* values, void* addends) { LANTERN_CHECK_LOADED void* ret = _lantern__test_optional_intlist_tensor_intarrayref(values, addends); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__test_optional_filled_intlist_tensor_intarrayref)(void* values, void* addends); @@ -8318,8 +8552,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar(void* data, void* reduce, void* lengths, void* indices, void* offsets, void* axis, void* unsafe, void* initial) { LANTERN_CHECK_LOADED void* ret = _lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar(data, reduce, lengths, indices, offsets, axis, unsafe, initial); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar)(void* grad, void* output, void* data, void* reduce, void* lengths, void* offsets, void* axis, void* initial); HOST_API void* lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar(void* grad, void* output, void* data, void* reduce, void* lengths, void* offsets, void* axis, void* initial) { LANTERN_CHECK_LOADED void* ret = _lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar(grad, output, data, reduce, lengths, offsets, axis, initial); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_pad_sequence_tensorlist_bool_double)(void* sequences, void* batch_first, void* padding_value); - HOST_API void* lantern_pad_sequence_tensorlist_bool_double(void* sequences, void* batch_first, void* padding_value) { LANTERN_CHECK_LOADED void* ret = _lantern_pad_sequence_tensorlist_bool_double(sequences, batch_first, padding_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_pad_sequence_tensorlist_bool_double_cstringview)(void* sequences, void* batch_first, void* padding_value, void* padding_side); + HOST_API void* lantern_pad_sequence_tensorlist_bool_double_cstringview(void* sequences, void* batch_first, void* padding_value, void* padding_side) { LANTERN_CHECK_LOADED void* ret = _lantern_pad_sequence_tensorlist_bool_double_cstringview(sequences, batch_first, padding_value, padding_side); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_flatten_dense_tensors_tensorlist)(void* tensors); HOST_API void* lantern_flatten_dense_tensors_tensorlist(void* tensors) { LANTERN_CHECK_LOADED void* ret = _lantern_flatten_dense_tensors_tensorlist(tensors); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_unflatten_dense_tensors_tensor_tensorlist)(void* flat, void* tensors); @@ -8406,50 +8640,64 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_alias_copy_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_alias_copy_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_Tensor_to_padded_tensor_tensor_double_intarrayref)(void* self, void* padding, void* output_size); HOST_API void* lantern_Tensor_to_padded_tensor_tensor_double_intarrayref(void* self, void* padding, void* output_size) { void* ret = _lantern_Tensor_to_padded_tensor_tensor_double_intarrayref(self, padding, output_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double)(void* values, void* offsets, void* max_lengths, void* padding_value); + HOST_API void* lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(void* values, void* offsets, void* max_lengths, void* padding_value) { LANTERN_CHECK_LOADED void* ret = _lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(values, offsets, max_lengths, padding_value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt)(void* dense, void* offsets, void* total_L); + HOST_API void* lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(void* dense, void* offsets, void* total_L) { LANTERN_CHECK_LOADED void* ret = _lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(dense, offsets, total_L); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_softmax_with_shape_tensor_tensor)(void* self, void* query); HOST_API void* lantern__nested_tensor_softmax_with_shape_tensor_tensor(void* self, void* query) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_softmax_with_shape_tensor_tensor(self, query); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__safe_softmax_tensor_intt_scalartype)(void* self, void* dim, void* dtype); + HOST_API void* lantern__safe_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__safe_softmax_tensor_intt_scalartype(self, dim, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* mask_type); HOST_API void* lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* need_weights, void* average_attn_weights, void* mask_type); HOST_API void* lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* need_weights, void* average_attn_weights, void* mask_type) { LANTERN_CHECK_LOADED void* ret = _lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal); - HOST_API void* lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* need_attn_weights, void* is_causal); - HOST_API void* lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* need_attn_weights, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal); - HOST_API void* lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask); - HOST_API void* lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask); - HOST_API void* lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(query, key, value, dropout_p, is_causal, return_debug_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset); - HOST_API void* lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool)(void* query, void* key, void* value, void* compute_log_sumexp, void* is_causal); - HOST_API void* lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* compute_log_sumexp, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(query, key, value, compute_log_sumexp, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs); - HOST_API void* lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool)(void* query, void* key, void* value, void* is_causal); - HOST_API void* lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(void* query, void* key, void* value, void* is_causal) { LANTERN_CHECK_LOADED void* ret = _lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(query, key, value, is_causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool)(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask); - HOST_API void* lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset); - HOST_API void* lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool)(void* query, void* key, void* value, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* compute_log_sumexp, void* causal); - HOST_API void* lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(void* query, void* key, void* value, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* compute_log_sumexp, void* causal) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs); - HOST_API void* lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa); + HOST_API void* lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa); + HOST_API void* lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale, void* enable_gqa); + HOST_API void* lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale, void* enable_gqa) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double)(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(query, key, value, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double)(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* attn_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* attn_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(query, key, value, dropout_p, is_causal, attn_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double)(void* query, void* key, void* value, void* attn_bias, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* dropout_p, void* is_causal, void* attn_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* dropout_p, void* is_causal, void* attn_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double)(void* grad_out, void* query, void* key, void* value, void* attn_bias, void* grad_input_mask, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale); + HOST_API void* lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* attn_bias, void* grad_input_mask, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double)(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double)(void* grad_out_, void* query, void* key, void* value, void* attn_bias, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* dropout_p, void* grad_input_mask, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(void* grad_out_, void* query, void* key, void* value, void* attn_bias, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* dropout_p, void* grad_input_mask, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double)(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale); + HOST_API void* lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* attn_bias, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* scale); + HOST_API void* lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* attn_bias, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* scale) { LANTERN_CHECK_LOADED void* ret = _lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor)(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale, void* window_size_left, void* window_size_right, void* seqused_k, void* alibi_slopes); + HOST_API void* lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale, void* window_size_left, void* window_size_right, void* seqused_k, void* alibi_slopes) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt)(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale, void* window_size_left, void* window_size_right); + HOST_API void* lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale, void* window_size_left, void* window_size_right) { LANTERN_CHECK_LOADED void* ret = _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt)(void* query, void* key, void* value, void* bias, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* dropout_p, void* custom_mask_type, void* compute_log_sumexp, void* scale, void* seqlen_k, void* window_size); + HOST_API void* lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(void* query, void* key, void* value, void* bias, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* dropout_p, void* custom_mask_type, void* compute_log_sumexp, void* scale, void* seqlen_k, void* window_size) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool)(void* grad_out_, void* query, void* key, void* value, void* bias, void* out, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* logsumexp, void* dropout_p, void* philox_seed, void* philox_offset, void* custom_mask_type, void* bias_requires_grad, void* scale, void* num_splits_key, void* window_size, void* shared_storage_dqdkdv); + HOST_API void* lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(void* grad_out_, void* query, void* key, void* value, void* bias, void* out, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* logsumexp, void* dropout_p, void* philox_seed, void* philox_offset, void* custom_mask_type, void* bias_requires_grad, void* scale, void* num_splits_key, void* window_size, void* shared_storage_dqdkdv) { LANTERN_CHECK_LOADED void* ret = _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double)(void* q, void* k, void* v, void* dropout_p); HOST_API void* lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(void* q, void* k, void* v, void* dropout_p) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(q, k, v, dropout_p); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt)(void* self, void* dropout_p, void* seed, void* offset); + HOST_API void* lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(void* self, void* dropout_p, void* seed, void* offset) { LANTERN_CHECK_LOADED void* ret = _lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(self, dropout_p, seed, offset); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask); HOST_API void* lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_airy_ai_tensor)(void* x); HOST_API void* lantern_special_airy_ai_tensor(void* x) { LANTERN_CHECK_LOADED void* ret = _lantern_special_airy_ai_tensor(x); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_airy_ai_out_tensor_tensor)(void* out, void* x); HOST_API void* lantern_special_airy_ai_out_tensor_tensor(void* out, void* x) { LANTERN_CHECK_LOADED void* ret = _lantern_special_airy_ai_out_tensor_tensor(out, x); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value); - HOST_API void* lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights); - HOST_API void* lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) { LANTERN_CHECK_LOADED void* ret = _lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_bessel_j0_tensor)(void* self); HOST_API void* lantern_special_bessel_j0_tensor(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_special_bessel_j0_tensor(self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_special_bessel_j0_out_tensor_tensor)(void* out, void* self); @@ -8642,8 +8890,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foobar_tensor_bool_bool_bool(void* self, void* arg1, void* arg2, void* arg3) { LANTERN_CHECK_LOADED void* ret = _lantern__foobar_tensor_bool_bool_bool(self, arg1, arg2, arg3); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__propagate_xla_data_tensor_tensor)(void* input, void* output); + HOST_API void* lantern__propagate_xla_data_tensor_tensor(void* input, void* output) { LANTERN_CHECK_LOADED void* ret = _lantern__propagate_xla_data_tensor_tensor(input, output); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt)(void* out, void* self, void* other, void* self_num_batch_dims); HOST_API void* lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(void* out, void* self, void* other, void* self_num_batch_dims) { LANTERN_CHECK_LOADED void* ret = _lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(out, self, other, self_num_batch_dims); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__cudnn_ctc_loss_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intt_bool_bool)(void* out0, void* out1, void* log_probs, void* targets, void* input_lengths, void* target_lengths, void* blank, void* deterministic, void* zero_infinity); @@ -8672,6 +8932,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_add_out_tensor_tensor_scalar_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern_add_out_tensor_tensor_scalar_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool)(void* out, void* theta, void* size, void* align_corners); HOST_API void* lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool(void* out, void* theta, void* size, void* align_corners) { LANTERN_CHECK_LOADED void* ret = _lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool(out, theta, size, align_corners); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__test_functorch_fallback_out_tensor_tensor_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__test_functorch_fallback_out_tensor_tensor_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__test_functorch_fallback_out_tensor_tensor_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bartlett_window_out_tensor_intt)(void* out, void* window_length); HOST_API void* lantern_bartlett_window_out_tensor_intt(void* out, void* window_length) { LANTERN_CHECK_LOADED void* ret = _lantern_bartlett_window_out_tensor_intt(out, window_length); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bartlett_window_out_tensor_intt_bool)(void* out, void* window_length, void* periodic); @@ -8726,8 +8988,6 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* exponential_average_factor, void* epsilon) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor)(void* out0, void* out1, void* out2, void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace); HOST_API void* lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(void* out0, void* out1, void* out2, void* input, void* grad_output, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* epsilon, void* reserveSpace) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor(out0, out1, out2, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); - HOST_API void* lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool)(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32); HOST_API void* lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_CHECK_LOADED void* ret = _lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(out, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mps_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups); @@ -8774,6 +9034,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt(void* out, void* grad, void* weight, void* indices, void* offsets, void* offset2bag, void* mode, void* padding_idx) { LANTERN_CHECK_LOADED void* ret = _lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt(out, grad, weight, indices, offsets, offset2bag, mode, padding_idx); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat)(void* out, void* size, void* names, void* memory_format); HOST_API void* lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat(void* out, void* size, void* names, void* memory_format) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat(out, size, names, memory_format); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_empty_permuted_out_tensor_intarrayref_intarrayref)(void* out, void* size, void* physical_layout); + HOST_API void* lantern_empty_permuted_out_tensor_intarrayref_intarrayref(void* out, void* size, void* physical_layout) { LANTERN_CHECK_LOADED void* ret = _lantern_empty_permuted_out_tensor_intarrayref_intarrayref(out, size, physical_layout); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_new_empty_out_tensor_tensor_intarrayref)(void* out, void* self, void* size); HOST_API void* lantern_new_empty_out_tensor_tensor_intarrayref(void* out, void* self, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern_new_empty_out_tensor_tensor_intarrayref(out, self, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_new_empty_strided_out_tensor_tensor_intarrayref_intarrayref)(void* out, void* self, void* size, void* stride); @@ -8806,6 +9068,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_fill_out_tensor_tensor_scalar(void* out, void* self, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern_fill_out_tensor_tensor_scalar(out, self, value); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_fill_out_tensor_tensor_tensor)(void* out, void* self, void* value); HOST_API void* lantern_fill_out_tensor_tensor_tensor(void* out, void* self, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern_fill_out_tensor_tensor_tensor(out, self, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_floor_divide_out_tensor_tensor_scalar)(void* out, void* self, void* other); + HOST_API void* lantern_floor_divide_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_floor_divide_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_full_out_tensor_intarrayref_scalar_dimnamelist)(void* out, void* size, void* fill_value, void* names); HOST_API void* lantern_full_out_tensor_intarrayref_scalar_dimnamelist(void* out, void* size, void* fill_value, void* names) { LANTERN_CHECK_LOADED void* ret = _lantern_full_out_tensor_intarrayref_scalar_dimnamelist(out, size, fill_value, names); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_full_like_out_tensor_tensor_scalar_memoryformat)(void* out, void* self, void* fill_value, void* memory_format); @@ -8844,12 +9108,12 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* N, void* C, void* HxW, void* group, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double(out0, out1, out2, input, weight, bias, N, C, HxW, group, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool)(void* out0, void* out1, void* out2, void* grad_out, void* input, void* mean, void* rstd, void* weight, void* N, void* C, void* HxW, void* group, void* output_mask); HOST_API void* lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool(void* out0, void* out1, void* out2, void* grad_out, void* input, void* mean, void* rstd, void* weight, void* N, void* C, void* HxW, void* group, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool(out0, out1, out2, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool)(void* out, void* self, void* indices, void* values, void* accumulate); - HOST_API void* lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(out, self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool)(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(out, self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); - HOST_API void* lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool)(void* out, void* self, void* indices, void* values, void* accumulate); + HOST_API void* lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) { LANTERN_CHECK_LOADED void* ret = _lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(out, self, indices, values, accumulate); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(out, self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool)(void* self, void* indices, void* values, void* accumulate, void* unsafe); + HOST_API void* lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_CHECK_LOADED void* ret = _lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(self, indices, values, accumulate, unsafe); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_isnan_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern_isnan_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_isnan_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_layer_norm_out_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_double)(void* out0, void* out1, void* out2, void* input, void* normalized_shape, void* weight, void* bias, void* eps); @@ -8886,6 +9150,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); HOST_API void* lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool)(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode); + HOST_API void* lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_CHECK_LOADED void* ret = _lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(out, self, kernel_size, stride, padding, dilation, ceil_mode); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_median_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern_median_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern_median_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_nanmedian_out_tensor_tensor)(void* out, void* self); @@ -8920,6 +9186,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_mul_out_tensor_tensor_scalar(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_mul_out_tensor_tensor_scalar(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps); HOST_API void* lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double(input, weight, bias, running_mean, running_var, training, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0, out1, out2, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_stats_out_tensor_tensor_tensor_double)(void* out0, void* out1, void* input, void* eps); HOST_API void* lantern_batch_norm_stats_out_tensor_tensor_tensor_double(void* out0, void* out1, void* input, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_stats_out_tensor_tensor_tensor_double(out0, out1, input, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_gather_stats_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt)(void* out0, void* out1, void* input, void* mean, void* invstd, void* running_mean, void* running_var, void* momentum, void* eps, void* count); @@ -8930,8 +9198,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(void* out0, void* out1, void* out2, void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_invstd, void* train, void* eps, void* output_mask) { LANTERN_CHECK_LOADED void* ret = _lantern_native_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool(out0, out1, out2, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool)(void* out0, void* out1, void* out2, void* out3, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g); HOST_API void* lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(void* out0, void* out1, void* out2, void* out3, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* input_g, void* weight_g, void* bias_g) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool_bool(out0, out1, out2, out3, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count); - HOST_API void* lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count); + HOST_API void* lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double)(void* out0, void* out1, void* input, void* running_mean, void* running_var, void* momentum); HOST_API void* lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double(void* out0, void* out1, void* input, void* running_mean, void* running_var, void* momentum) { LANTERN_CHECK_LOADED void* ret = _lantern_batch_norm_update_stats_out_tensor_tensor_tensor_tensor_tensor_double(out0, out1, input, running_mean, running_var, momentum); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nnpack_spatial_convolution_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref)(void* out, void* input, void* weight, void* bias, void* padding, void* stride); @@ -9004,8 +9272,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt(void* out, void* self, void* split_sizes, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt(out, self, split_sizes, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sum_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_sum_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_sum_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_prod_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_prod_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_prod_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__mkldnn_transpose_out_tensor_tensor_intt_intt)(void* out, void* self, void* dim0, void* dim1); @@ -9026,10 +9294,16 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__nested_tensor_size_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_size_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_strides_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern__nested_tensor_strides_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_strides_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_tensor_storage_offsets_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern__nested_tensor_storage_offsets_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_tensor_storage_offsets_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor)(void* out, void* padded, void* nt_example); HOST_API void* lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(void* out, void* padded, void* nt_example) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(out, padded, nt_example); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref)(void* out, void* self, void* nested_size, void* nested_strides, void* offsets); - HOST_API void* lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(out, self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor)(void* out, void* self, void* nested_size, void* nested_strides, void* offsets); + HOST_API void* lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(out, self, nested_size, nested_strides, offsets); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor)(void* out, void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen); + HOST_API void* lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(out, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__nested_get_values_copy_out_tensor_tensor)(void* out, void* self); + HOST_API void* lantern__nested_get_values_copy_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__nested_get_values_copy_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt)(void* out, void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim); HOST_API void* lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(void* out, void* i1, void* i2, void* i3, void* expand1, void* expand2, void* expand3, void* sumdim, void* unroll_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt(out, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__unique_out_tensor_tensor_tensor_bool_bool)(void* out0, void* out1, void* self, void* sorted, void* return_inverse); @@ -9044,8 +9318,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool(void* out0, void* out1, void* out2, void* self, void* sorted, void* return_inverse, void* return_counts) { LANTERN_CHECK_LOADED void* ret = _lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool(out0, out1, out2, self, sorted, return_inverse, return_counts); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__unsafe_view_out_tensor_tensor_intarrayref)(void* out, void* self, void* size); HOST_API void* lantern__unsafe_view_out_tensor_tensor_intarrayref(void* out, void* self, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__unsafe_view_out_tensor_tensor_intarrayref(out, self, size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); - HOST_API void* lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool)(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim); + HOST_API void* lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_CHECK_LOADED void* ret = _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(out0, out1, self, dim, correction, keepdim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt)(void* out0, void* out1, void* v, void* g, void* dim); HOST_API void* lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt(void* out0, void* out1, void* v, void* g, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt(out0, out1, v, g, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__weight_norm_interface_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* out0, void* out1, void* grad_w, void* saved_v, void* saved_g, void* saved_norms, void* dim); @@ -9072,6 +9346,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_native_norm_out_tensor_tensor_scalar(void* out, void* self, void* p) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_out_tensor_tensor_scalar(out, self, p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype)(void* out, void* self, void* p, void* dim, void* keepdim, void* dtype); HOST_API void* lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype(void* out, void* self, void* p, void* dim, void* keepdim, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype(out, self, p, dim, keepdim, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double)(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double)(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps); + HOST_API void* lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) { LANTERN_CHECK_LOADED void* ret = _lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(out0, out1, out2, out3, input, weight, bias, running_mean, running_var, momentum, eps); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_out_tensor_tensor_intarrayref)(void* out, void* self, void* dim); HOST_API void* lantern__sparse_sum_out_tensor_tensor_intarrayref(void* out, void* self, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_sum_out_tensor_tensor_intarrayref(out, self, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_sum_backward_out_tensor_tensor_tensor_intarrayref)(void* out, void* grad, void* self, void* dim); @@ -9120,8 +9398,8 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_sparse_coo_tensor_out_tensor_intarrayref(void* out, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_coo_tensor_out_tensor_intarrayref(out, size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref)(void* out, void* sparse_dim, void* dense_dim, void* size); HOST_API void* lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref(void* out, void* sparse_dim, void* dense_dim, void* size) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref(out, sparse_dim, dense_dim, size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor)(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values); - HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(out, sparse_dim, dense_dim, size, indices, values); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool)(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* is_coalesced); + HOST_API void* lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* is_coalesced) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(out, sparse_dim, dense_dim, size, indices, values, is_coalesced); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt)(void* out, void* self, void* size, void* sparse_dim, void* dense_dim); HOST_API void* lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt(void* out, void* self, void* size, void* sparse_dim, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt(out, self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_resize_tensor_intarrayref_intt_intt)(void* self, void* size, void* sparse_dim, void* dense_dim); @@ -9132,8 +9410,10 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt(void* self, void* size, void* sparse_dim, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt(self, size, sparse_dim, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_sparse_mask_out_tensor_tensor_tensor)(void* out, void* self, void* mask); HOST_API void* lantern_sparse_mask_out_tensor_tensor_tensor(void* out, void* self, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern_sparse_mask_out_tensor_tensor_tensor(out, self, mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__to_dense_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); - HOST_API void* lantern__to_dense_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__to_dense_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool)(void* out, void* self, void* mask, void* accumulate_matches); + HOST_API void* lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(void* out, void* self, void* mask, void* accumulate_matches) { LANTERN_CHECK_LOADED void* ret = _lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(out, self, mask, accumulate_matches); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_dense_out_tensor_tensor_scalartype_bool)(void* out, void* self, void* dtype, void* masked_grad); + HOST_API void* lantern__to_dense_out_tensor_tensor_scalartype_bool(void* out, void* self, void* dtype, void* masked_grad) { LANTERN_CHECK_LOADED void* ret = _lantern__to_dense_out_tensor_tensor_scalartype_bool(out, self, dtype, masked_grad); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__coalesce_out_tensor_tensor)(void* out, void* self); HOST_API void* lantern__coalesce_out_tensor_tensor(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__coalesce_out_tensor_tensor(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__coalesced_out_tensor_tensor_bool)(void* out, void* self, void* coalesced); @@ -9144,24 +9424,24 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool(void* out, void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool(out, self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_copy_sparse_to_sparse_tensor_tensor_bool)(void* self, void* src, void* non_blocking); HOST_API void* lantern_copy_sparse_to_sparse_tensor_tensor_bool(void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern_copy_sparse_to_sparse_tensor_tensor_bool(self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_out_tensor_tensor_intt)(void* out, void* self, void* sparse_dim); - HOST_API void* lantern_to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_out_tensor_tensor_intt(out, self, sparse_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt)(void* out, void* self, void* layout, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(out, self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_csr_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); - HOST_API void* lantern_to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_csr_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_csc_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); - HOST_API void* lantern_to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_csc_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); - HOST_API void* lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_out_tensor_tensor_intt)(void* out, void* self, void* sparse_dim); + HOST_API void* lantern__to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_out_tensor_tensor_intt(out, self, sparse_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt)(void* out, void* self, void* layout, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(out, self, layout, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_csr_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); + HOST_API void* lantern__to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_csr_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_csc_out_tensor_tensor_intt)(void* out, void* self, void* dense_dim); + HOST_API void* lantern__to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_csc_out_tensor_tensor_intt(out, self, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt)(void* out, void* self, void* blocksize, void* dense_dim); + HOST_API void* lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_CHECK_LOADED void* ret = _lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(out, self, blocksize, dense_dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_to_mkldnn_out_tensor_tensor_scalartype)(void* out, void* self, void* dtype); HOST_API void* lantern_to_mkldnn_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern_to_mkldnn_out_tensor_tensor_scalartype(out, self, dtype); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); HOST_API void* lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(out, self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups); - HOST_API void* lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(void* out, void* self, void* padding, void* stride, void* dilation, void* groups) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(out, self, padding, stride, dilation, groups); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref)(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size); + HOST_API void* lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_CHECK_LOADED void* ret = _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(out, self, padding, stride, dilation, groups, input_size); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool)(void* out, void* self, void* dtype, void* reduce_range); HOST_API void* lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool(void* out, void* self, void* dtype, void* reduce_range) { LANTERN_CHECK_LOADED void* ret = _lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool(out, self, dtype, reduce_range); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_quantize_per_tensor_out_tensor_tensor_double_intt_scalartype)(void* out, void* self, void* scale, void* zero_point, void* dtype); @@ -9320,8 +9600,6 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool(void* out, void* self, void* bins, void* weight, void* density) { LANTERN_CHECK_LOADED void* ret = _lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool(out, self, bins, weight, density); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_remainder_out_tensor_scalar_tensor)(void* out, void* self, void* other); HOST_API void* lantern_remainder_out_tensor_scalar_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern_remainder_out_tensor_scalar_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_argsort_out_tensor_tensor_bool_intt_bool)(void* out, void* self, void* stable, void* dim, void* descending); - HOST_API void* lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) { LANTERN_CHECK_LOADED void* ret = _lantern_argsort_out_tensor_tensor_bool_intt_bool(out, self, stable, dim, descending); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt)(void* out, void* grad_in, void* input_sizes, void* dim, void* size, void* step); HOST_API void* lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(void* out, void* grad_in, void* input_sizes, void* dim, void* size, void* step) { LANTERN_CHECK_LOADED void* ret = _lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(out, grad_in, input_sizes, dim, size, step); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_normal_out_tensor_tensor_double_double_generator)(void* out, void* self, void* mean, void* std, void* generator); @@ -9336,60 +9614,70 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt(void* self, void* growth_tracker, void* found_inf, void* scale_growth_factor, void* scale_backoff_factor, void* growth_interval) { LANTERN_CHECK_LOADED void* ret = _lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); - HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* other, void* alpha); HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar)(void* out, void* self, void* other, void* alpha); + HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* other, void* alpha); HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(out, self, other, alpha); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); - HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); - HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_div_out_tensorlist_tensorlist_tensor)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_div_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_div_out_tensorlist_tensorlist_tensor(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); + HOST_API void* lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* scalar); + HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(out, self, scalar); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* other); + HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(out, self, other); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* scalars); HOST_API void* lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(out, self, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_tensorlist)(void* self); - HOST_API void* lantern__foreach_zero_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_tensorlist(self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); + HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_abs_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_abs_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_abs_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_acos_out_tensorlist_tensorlist)(void* out, void* self); @@ -9408,10 +9696,20 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_erf_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erf_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_erfc_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_erfc_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_erfc_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_exp_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_exp_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_expm1_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_expm1_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_expm1_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_floor_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_floor_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_floor_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* tensors1, void* weights); + HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out, self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensors1, void* weight); + HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out, self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_log_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log10_out_tensorlist_tensorlist)(void* out, void* self); @@ -9420,50 +9718,46 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__foreach_log1p_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log1p_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_log2_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_log2_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_log2_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_max_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_max_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_max_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_neg_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_neg_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_neg_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lgamma_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lgamma_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_frac_out_tensorlist_tensorlist)(void* out, void* self); - HOST_API void* lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_frac_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype)(void* out, void* self, void* ord, void* dtype); + HOST_API void* lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(void* out, void* self, void* ord, void* dtype) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(out, self, ord, dtype); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(out, self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_scalar(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_scalar(out, self, exponent); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* exponent); + HOST_API void* lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* exponent) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(out, self, exponent); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_reciprocal_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_reciprocal_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_reciprocal_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_round_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_round_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_sigmoid_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_sigmoid_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sigmoid_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sign_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sign_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sign_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sin_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sin_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sinh_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sinh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_sqrt_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_sqrt_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tan_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tan_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_tanh_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_tanh_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foreach_trunc_out_tensorlist_tensorlist)(void* out, void* self); HOST_API void* lantern__foreach_trunc_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_trunc_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensor1, void* tensor2, void* value); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(out, self, tensor1, tensor2, value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor)(void* out, void* self, void* tensor1, void* tensor2, void* scalars); - HOST_API void* lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(out, self, tensor1, tensor2, scalars); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_norm_out_tensorlist_tensorlist_scalar)(void* out, void* self, void* ord); - HOST_API void* lantern__foreach_norm_out_tensorlist_tensorlist_scalar(void* out, void* self, void* ord) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_norm_out_tensorlist_tensorlist_scalar(out, self, ord); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist)(void* out, void* self, void* tensors1, void* weights); - HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(out, self, tensors1, weights); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar)(void* out, void* self, void* tensors1, void* weight); - HOST_API void* lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(out, self, tensors1, weight); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_out_tensorlist_tensorlist)(void* out, void* self); + HOST_API void* lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_out_tensorlist_tensorlist(out, self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_zero_tensorlist)(void* self); + HOST_API void* lantern__foreach_zero_tensorlist(void* self) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_zero_tensorlist(self); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool)(void* out, void* self, void* src, void* non_blocking); + HOST_API void* lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(void* out, void* self, void* src, void* non_blocking) { LANTERN_CHECK_LOADED void* ret = _lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(out, self, src, non_blocking); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_bucketize_out_tensor_scalar_tensor_bool_bool)(void* out, void* self, void* boundaries, void* out_int32, void* right); HOST_API void* lantern_bucketize_out_tensor_scalar_tensor_bool_bool(void* out, void* self, void* boundaries, void* out_int32, void* right) { LANTERN_CHECK_LOADED void* ret = _lantern_bucketize_out_tensor_scalar_tensor_bool_bool(out, self, boundaries, out_int32, right); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor)(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter); - HOST_API void* lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_CHECK_LOADED void* ret = _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(out, sorted_sequence, self, out_int32, right, side, sorter); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt)(void* out, void* glu, void* x, void* dx, void* dim); HOST_API void* lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt(void* out, void* glu, void* x, void* dx, void* dim) { LANTERN_CHECK_LOADED void* ret = _lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt(out, glu, x, dx, dim); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern_glu_backward_jvp_out_tensor_tensor_tensor_tensor_tensor_tensor_intt)(void* out, void* grad_x, void* grad_glu, void* x, void* dgrad_glu, void* dx, void* dim); @@ -9588,20 +9882,36 @@ HOST_API void lantern_buffer_from_tensor (void* tensor, void* buffer, int n) HOST_API void* lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double(void* out, void* q, void* k, void* v, void* dropout_p) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double(out, q, k, v, dropout_p); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor)(void* out, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask); HOST_API void* lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(void* out, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask) { LANTERN_CHECK_LOADED void* ret = _lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(out, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor)(void* out0, void* out1, void* out2, void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value); - HOST_API void* lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out0, void* out1, void* out2, void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) { LANTERN_CHECK_LOADED void* ret = _lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(out0, out1, out2, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); LANTERN_HOST_HANDLER return ret; } - LANTERN_API void* (LANTERN_PTR _lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool)(void* out0, void* out1, void* out2, void* out3, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights); - HOST_API void* lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* out0, void* out1, void* out2, void* out3, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) { LANTERN_CHECK_LOADED void* ret = _lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(out0, out1, out2, out3, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__foobar_out_tensor_tensor_bool_bool_bool)(void* out, void* self, void* arg1, void* arg2, void* arg3); HOST_API void* lantern__foobar_out_tensor_tensor_bool_bool_bool(void* out, void* self, void* arg1, void* arg2, void* arg3) { LANTERN_CHECK_LOADED void* ret = _lantern__foobar_out_tensor_tensor_bool_bool_bool(out, self, arg1, arg2, arg3); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); HOST_API void* lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(out, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor)(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(out, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor)(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* out, void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* out, void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(out, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } + LANTERN_API void* (LANTERN_PTR _lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor)(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf); + HOST_API void* lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) { LANTERN_CHECK_LOADED void* ret = _lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); LANTERN_HOST_HANDLER return ret; } /* Autogen Headers -- End */ #ifdef __cplusplus @@ -9711,8 +10021,12 @@ bool lanternLoadLibrary(const std::string &libPath, std::string *pError) } pLibrary = (void *)::LoadLibraryEx(libFile.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS); +#elif defined(__APPLE__) || defined(__MACH__) + pLibrary = ::dlopen(libFile.c_str(), RTLD_NOW); #else - pLibrary = ::dlopen(libFile.c_str(), RTLD_NOW | RTLD_GLOBAL); + // On Linux use RTLD_DEEPBIND to avoid conflicts and make sure libtorch calls into + // the bundled MKL BLAS. + pLibrary = dlopen(libFile.c_str(), RTLD_NOW | RTLD_DEEPBIND); #endif if (pLibrary == NULL) { @@ -10261,7 +10575,17 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_align_as_tensor_tensor) LOAD_SYMBOL(_lantern_align_tensors_tensorlist) LOAD_SYMBOL(_lantern__assert_async_tensor) + LOAD_SYMBOL(_lantern__assert_async_tensor_cstringview) + LOAD_SYMBOL(_lantern__assert_scalar_scalar_cstringview) + LOAD_SYMBOL(_lantern__functional_assert_scalar_scalar_cstringview_tensor) + LOAD_SYMBOL(_lantern__functional_assert_async_tensor_cstringview_tensor) LOAD_SYMBOL(_lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype) + LOAD_SYMBOL(_lantern__print_cstringview) + LOAD_SYMBOL(_lantern_sym_constrain_range_scalar_intt_intt) + LOAD_SYMBOL(_lantern_sym_constrain_range_for_size_scalar_intt_intt) + LOAD_SYMBOL(_lantern__functional_sym_constrain_range_scalar_intt_intt_tensor) + LOAD_SYMBOL(_lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor) + LOAD_SYMBOL(_lantern__make_dep_token_tensoroptions_memoryformat) LOAD_SYMBOL(_lantern_Tensor_refine_names_tensor_dimnamelist) LOAD_SYMBOL(_lantern__use_cudnn_ctc_loss_tensor_tensor_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern__use_cudnn_ctc_loss_tensor_tensor_tensor_tensor_intt) @@ -10370,9 +10694,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__is_any_true_tensor) LOAD_SYMBOL(_lantern_Tensor__is_any_true_tensor) LOAD_SYMBOL(_lantern__test_check_tensor_tensor) + LOAD_SYMBOL(_lantern__test_functorch_fallback_tensor_tensor) LOAD_SYMBOL(_lantern_all_tensor_intt_bool) LOAD_SYMBOL(_lantern_Tensor_all_tensor_intt_bool) + LOAD_SYMBOL(_lantern_all_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern_Tensor_all_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_all_out_tensor_tensor_intt_bool) + LOAD_SYMBOL(_lantern_all_out_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_all_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_all_tensor_dimname_bool) LOAD_SYMBOL(_lantern_all_out_tensor_tensor_dimname_bool) @@ -10380,7 +10708,10 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_allclose_tensor_tensor_double_double_bool) LOAD_SYMBOL(_lantern_any_tensor_intt_bool) LOAD_SYMBOL(_lantern_Tensor_any_tensor_intt_bool) + LOAD_SYMBOL(_lantern_any_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern_Tensor_any_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_any_out_tensor_tensor_intt_bool) + LOAD_SYMBOL(_lantern_any_out_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern_any_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_any_tensor_dimname_bool) LOAD_SYMBOL(_lantern_any_out_tensor_tensor_dimname_bool) @@ -10493,6 +10824,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_copysign_tensor_scalar) LOAD_SYMBOL(_lantern_Tensor_copysign__tensor_scalar) LOAD_SYMBOL(_lantern_copysign_out_tensor_tensor_scalar) + LOAD_SYMBOL(_lantern__lazy_clone_tensor) + LOAD_SYMBOL(_lantern_Tensor__lazy_clone_tensor) LOAD_SYMBOL(_lantern_logical_not_tensor) LOAD_SYMBOL(_lantern_Tensor_logical_not_tensor) LOAD_SYMBOL(_lantern_Tensor_logical_not__tensor) @@ -10642,6 +10975,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_cudnn_batch_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_cudnn_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor) LOAD_SYMBOL(_lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) + LOAD_SYMBOL(_lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern__mps_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_mps_convolution_transpose_backward_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_stdarraybool) @@ -10770,6 +11104,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__embedding_bag_per_sample_weights_backward_tensor_tensor_tensor_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_empty_intarrayref_dimnamelist_tensoroptions_memoryformat) LOAD_SYMBOL(_lantern_empty_intarrayref_tensoroptions_memoryformat) + LOAD_SYMBOL(_lantern_empty_permuted_intarrayref_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_empty_strided_tensor_intarrayref_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_Tensor_new_full_tensor_intarrayref_scalar_tensoroptions) @@ -10891,13 +11226,16 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__fft_c2c_tensor_intarrayref_intt_bool) LOAD_SYMBOL(_lantern__fft_c2c_out_tensor_tensor_intarrayref_intt_bool) LOAD_SYMBOL(_lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_intt) - LOAD_SYMBOL(_lantern__cufft_get_plan_cache_size_intt) - LOAD_SYMBOL(_lantern__cufft_get_plan_cache_max_size_intt) - LOAD_SYMBOL(_lantern__cufft_set_plan_cache_max_size_intt_intt) - LOAD_SYMBOL(_lantern__cufft_clear_plan_cache_intt) - LOAD_SYMBOL(_lantern_index_tensor_constclistcoptionaltensor) - LOAD_SYMBOL(_lantern_Tensor_index_tensor_constclistcoptionaltensor) - LOAD_SYMBOL(_lantern_index_out_tensor_tensor_constclistcoptionaltensor) + LOAD_SYMBOL(_lantern__cufft_get_plan_cache_size_deviceindex) + LOAD_SYMBOL(_lantern__cufft_get_plan_cache_max_size_deviceindex) + LOAD_SYMBOL(_lantern__cufft_set_plan_cache_max_size_deviceindex_intt) + LOAD_SYMBOL(_lantern__cufft_clear_plan_cache_deviceindex) + LOAD_SYMBOL(_lantern_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern_Tensor_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern_index_out_tensor_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern__unsafe_index_tensor_constcliststdoptionaltensor) + LOAD_SYMBOL(_lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar) + LOAD_SYMBOL(_lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor) LOAD_SYMBOL(_lantern_index_copy_out_tensor_tensor_intt_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_index_copy__tensor_intt_tensor_tensor) LOAD_SYMBOL(_lantern_index_copy_tensor_intt_tensor_tensor) @@ -10905,11 +11243,12 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_index_copy__tensor_dimname_tensor_tensor) LOAD_SYMBOL(_lantern_index_copy_tensor_dimname_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_index_copy_tensor_dimname_tensor_tensor) - LOAD_SYMBOL(_lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_instance_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double_bool) LOAD_SYMBOL(_lantern_isclose_tensor_tensor_double_double_bool) LOAD_SYMBOL(_lantern_Tensor_isclose_tensor_tensor_double_double_bool) @@ -10956,6 +11295,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_layer_norm_tensor_intarrayref_tensor_tensor_double_bool) LOAD_SYMBOL(_lantern_native_layer_norm_tensor_intarrayref_tensor_tensor_double) LOAD_SYMBOL(_lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool) + LOAD_SYMBOL(_lantern_rms_norm_tensor_intarrayref_tensor_double) LOAD_SYMBOL(_lantern_nan_to_num_tensor_double_double_double) LOAD_SYMBOL(_lantern_Tensor_nan_to_num_tensor_double_double_double) LOAD_SYMBOL(_lantern_nan_to_num__tensor_double_double_double) @@ -10968,10 +11308,22 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_linear_backward_input_intarrayref_tensor_tensor) LOAD_SYMBOL(_lantern_mkldnn_linear_backward_weights_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool) + LOAD_SYMBOL(_lantern__cslt_compress_tensor) + LOAD_SYMBOL(_lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt) + LOAD_SYMBOL(_lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__sparse_semi_structured_tile_tensor_cstringview_bool) + LOAD_SYMBOL(_lantern__sparse_semi_structured_apply_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_semi_structured_apply_dense_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype) + LOAD_SYMBOL(_lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype) + LOAD_SYMBOL(_lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype) + LOAD_SYMBOL(_lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview) LOAD_SYMBOL(_lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_int8_weight_tensor_tensor_tensor_tensor_scalar_scalar_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_quantize_weight_tensor) LOAD_SYMBOL(_lantern_fbgemm_pack_gemm_matrix_fp16_tensor) + LOAD_SYMBOL(_lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_fbgemm_linear_fp16_weight_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_fbgemm_pack_quantized_matrix_tensor) @@ -10982,7 +11334,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_ldexp__tensor_tensor) LOAD_SYMBOL(_lantern_ldexp_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_linspace_scalar_scalar_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_tensor_tensor_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_tensor_scalar_intt_tensoroptions) + LOAD_SYMBOL(_lantern_linspace_scalar_tensor_intt_tensoroptions) LOAD_SYMBOL(_lantern_linspace_out_tensor_scalar_scalar_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_tensor_tensor_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_tensor_scalar_intt) + LOAD_SYMBOL(_lantern_linspace_out_tensor_scalar_tensor_intt) LOAD_SYMBOL(_lantern_log_tensor) LOAD_SYMBOL(_lantern_Tensor_log_tensor) LOAD_SYMBOL(_lantern_log__tensor) @@ -11022,7 +11380,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_xlogy_out_tensor_scalar_tensor) LOAD_SYMBOL(_lantern_xlogy_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_logspace_scalar_scalar_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_tensor_tensor_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_tensor_scalar_intt_double_tensoroptions) + LOAD_SYMBOL(_lantern_logspace_scalar_tensor_intt_double_tensoroptions) LOAD_SYMBOL(_lantern_logspace_out_tensor_scalar_scalar_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_tensor_tensor_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_tensor_scalar_intt_double) + LOAD_SYMBOL(_lantern_logspace_out_tensor_scalar_tensor_intt_double) LOAD_SYMBOL(_lantern_log_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern_Tensor_log_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern_log_softmax_out_tensor_tensor_intt_scalartype) @@ -11084,9 +11448,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_max_pool3d_backward_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool1d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) + LOAD_SYMBOL(_lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_mean_tensor_scalartype) LOAD_SYMBOL(_lantern_Tensor_mean_tensor_scalartype) + LOAD_SYMBOL(_lantern_mean_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern_mean_tensor_intarrayref_bool_scalartype) LOAD_SYMBOL(_lantern_Tensor_mean_tensor_intarrayref_bool_scalartype) LOAD_SYMBOL(_lantern_mean_out_tensor_tensor_intarrayref_bool_scalartype) @@ -11138,6 +11504,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mm_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_mm_tensor_tensor) LOAD_SYMBOL(_lantern_mm_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__int_mm_tensor_tensor) + LOAD_SYMBOL(_lantern__int_mm_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__convert_weight_to_int4pack_tensor_intt) + LOAD_SYMBOL(_lantern__weight_int4pack_mm_tensor_tensor_intt_tensor) + LOAD_SYMBOL(_lantern__weight_int8pack_mm_tensor_tensor_tensor) LOAD_SYMBOL(_lantern__sparse_mm_tensor_tensor) LOAD_SYMBOL(_lantern__sparse_mm_tensor_tensor_cstringview) LOAD_SYMBOL(_lantern__sparse_sparse_matmul_tensor_tensor) @@ -11178,6 +11549,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_native_batch_norm_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_native_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_double_double) + LOAD_SYMBOL(_lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) @@ -11398,6 +11770,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_slice_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_Tensor_slice_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt) + LOAD_SYMBOL(_lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt) + LOAD_SYMBOL(_lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_Tensor_slice_scatter_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_select_scatter_tensor_tensor_intt_intt) @@ -11454,6 +11828,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_sspaddmm_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_Tensor_sspaddmm_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar) + LOAD_SYMBOL(_lantern__chunk_cat_tensorlist_intt_intt) + LOAD_SYMBOL(_lantern__chunk_cat_out_tensor_tensorlist_intt_intt) LOAD_SYMBOL(_lantern_stack_tensorlist_intt) LOAD_SYMBOL(_lantern_stack_out_tensor_tensorlist_intt) LOAD_SYMBOL(_lantern__stack_tensorlist_intt) @@ -11501,21 +11877,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_std_tensor_bool) LOAD_SYMBOL(_lantern_std_tensor_intarrayref_bool_bool) LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_tensor_intarrayref_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_tensor_intarrayref_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_std_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_out_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_std_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_std_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_std_out_tensor_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_prod_tensor_scalartype) LOAD_SYMBOL(_lantern_Tensor_prod_tensor_scalartype) LOAD_SYMBOL(_lantern_prod_tensor_intt_bool_scalartype) @@ -11574,10 +11950,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__nested_from_padded_tensor_tensor_bool) LOAD_SYMBOL(_lantern_Tensor__nested_tensor_size_tensor) LOAD_SYMBOL(_lantern_Tensor__nested_tensor_strides_tensor) - LOAD_SYMBOL(_lantern_Tensor__nested_tensor_offsets_tensor) + LOAD_SYMBOL(_lantern_Tensor__nested_tensor_storage_offsets_tensor) LOAD_SYMBOL(_lantern__nested_from_padded_and_nested_example_tensor_tensor) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_copy_tensor) + LOAD_SYMBOL(_lantern__nested_get_offsets_tensor) + LOAD_SYMBOL(_lantern__nested_get_lengths_tensor) + LOAD_SYMBOL(_lantern__nested_get_ragged_idx_tensor) + LOAD_SYMBOL(_lantern__nested_get_min_seqlen_tensor) + LOAD_SYMBOL(_lantern__nested_get_max_seqlen_tensor) + LOAD_SYMBOL(_lantern__nested_get_jagged_dummy_tensor) + LOAD_SYMBOL(_lantern__nested_compute_contiguous_strides_offsets_tensor) LOAD_SYMBOL(_lantern__trilinear_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_triplet_margin_loss_tensor_tensor_tensor_double_double_double_bool_intt) LOAD_SYMBOL(_lantern_trunc_tensor) @@ -11606,21 +11993,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_var_tensor_bool) LOAD_SYMBOL(_lantern_var_tensor_intarrayref_bool_bool) LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_tensor_intarrayref_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_tensor_intarrayref_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_var_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_out_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_bool_bool) LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_intt_bool) - LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_var_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_Tensor_var_tensor_dimnamelist_scalar_bool) + LOAD_SYMBOL(_lantern_var_out_tensor_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_bool_bool) - LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_bool_bool) - LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_tensor_dimnamelist_scalar_bool) LOAD_SYMBOL(_lantern_Tensor_view_as_tensor_tensor) LOAD_SYMBOL(_lantern_where_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_where_tensor_tensor_tensor) @@ -11648,6 +12035,10 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_binomial_tensor_tensor_generator) LOAD_SYMBOL(_lantern_native_norm_tensor_scalar) LOAD_SYMBOL(_lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype) + LOAD_SYMBOL(_lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor) LOAD_SYMBOL(_lantern__sparse_sum_tensor) LOAD_SYMBOL(_lantern__sparse_sum_tensor_scalartype) LOAD_SYMBOL(_lantern__sparse_sum_tensor_intarrayref) @@ -11731,6 +12122,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__addmm_activation_out_tensor_tensor_tensor_tensor_scalar_scalar_bool) LOAD_SYMBOL(_lantern__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool) LOAD_SYMBOL(_lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool) + LOAD_SYMBOL(_lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions) LOAD_SYMBOL(_lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_csr_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_csc_tensor_tensor_tensor_tensor_intarrayref_tensoroptions) @@ -11747,24 +12141,25 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_bsr_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern__sparse_bsc_tensor_unsafe_tensor_tensor_tensor_intarrayref_tensoroptions) LOAD_SYMBOL(_lantern_sparse_coo_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_tensoroptions) - LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool) + LOAD_SYMBOL(_lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool) + LOAD_SYMBOL(_lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool) LOAD_SYMBOL(_lantern__validate_sparse_compressed_tensor_args_tensor_tensor_tensor_intarrayref_layout) LOAD_SYMBOL(_lantern__validate_sparse_csr_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_csc_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_bsr_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__validate_sparse_bsc_tensor_args_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool) LOAD_SYMBOL(_lantern_Tensor_sparse_resize__tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_Tensor_sparse_resize_and_clear__tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_Tensor_sparse_mask_tensor_tensor) + LOAD_SYMBOL(_lantern_Tensor__sparse_mask_projection_tensor_tensor_bool) LOAD_SYMBOL(_lantern__to_cpu_tensorlist) - LOAD_SYMBOL(_lantern_Tensor_to_dense_tensor_scalartype) - LOAD_SYMBOL(_lantern_Tensor__to_dense_tensor_scalartype) - LOAD_SYMBOL(_lantern_to_dense_backward_tensor_tensor) + LOAD_SYMBOL(_lantern_Tensor_to_dense_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern_Tensor__to_dense_tensor_scalartype_bool) + LOAD_SYMBOL(_lantern_to_dense_backward_tensor_tensor_bool) LOAD_SYMBOL(_lantern_Tensor_sparse_dim_tensor) LOAD_SYMBOL(_lantern_Tensor__dimi_tensor) LOAD_SYMBOL(_lantern_Tensor_dense_dim_tensor) @@ -11790,14 +12185,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unbind_tensor_dimname) LOAD_SYMBOL(_lantern_Tensor_unbind_tensor_dimname) LOAD_SYMBOL(_lantern_Tensor_to_sparse_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_csr_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_csr_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_csc_tensor_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_csc_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_semi_structured_tensor) LOAD_SYMBOL(_lantern_Tensor_to_mkldnn_tensor_scalartype) LOAD_SYMBOL(_lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) - LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt) + LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) LOAD_SYMBOL(_lantern_to_mkldnn_backward_tensor_tensor) LOAD_SYMBOL(_lantern_quantize_per_tensor_dynamic_tensor_scalartype_bool) LOAD_SYMBOL(_lantern_quantize_per_tensor_tensor_double_intt_scalartype) @@ -11904,6 +12306,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_masked_scatter__tensor_tensor_tensor) LOAD_SYMBOL(_lantern_masked_scatter_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_masked_scatter_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern_masked_scatter_backward_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__masked_softmax_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern__masked_softmax_backward_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_Tensor_view_tensor_intarrayref) @@ -12180,6 +12583,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_nonzero_out_tensor_tensor) LOAD_SYMBOL(_lantern_nonzero_tensor) LOAD_SYMBOL(_lantern_Tensor_nonzero_tensor) + LOAD_SYMBOL(_lantern_nonzero_static_out_tensor_tensor_intt_intt) + LOAD_SYMBOL(_lantern_nonzero_static_tensor_intt_intt) + LOAD_SYMBOL(_lantern_Tensor_nonzero_static_tensor_intt_intt) LOAD_SYMBOL(_lantern_nonzero_numpy_tensor) LOAD_SYMBOL(_lantern_Tensor_nonzero_numpy_tensor) LOAD_SYMBOL(_lantern_argwhere_tensor) @@ -12339,6 +12745,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_remainder_scalar_tensor) LOAD_SYMBOL(_lantern_min_tensor) LOAD_SYMBOL(_lantern_Tensor_min_tensor) + LOAD_SYMBOL(_lantern_min_out_tensor_tensor) LOAD_SYMBOL(_lantern_fmin_tensor_tensor) LOAD_SYMBOL(_lantern_Tensor_fmin_tensor_tensor) LOAD_SYMBOL(_lantern_fmin_out_tensor_tensor_tensor) @@ -12391,6 +12798,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_intt_bool) LOAD_SYMBOL(_lantern_argsort_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_bool_intt_bool) + LOAD_SYMBOL(_lantern_argsort_out_tensor_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_argsort_tensor_dimname_bool) LOAD_SYMBOL(_lantern_Tensor_argsort_tensor_dimname_bool) LOAD_SYMBOL(_lantern_topk_out_tensor_tensor_tensor_intt_intt_bool_bool) @@ -12442,57 +12850,70 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__amp_update_scale__tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__foreach_add_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_add_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add_tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_add__tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_div__tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_div_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_div__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_add_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_add__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub__tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_div_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div__tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_clamp_max_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_clamp_max__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_maximum_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_maximum__tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_minimum_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_minimum__tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_exp_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero__tensorlist) - LOAD_SYMBOL(_lantern__foreach_exp__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt__tensorlist) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor) LOAD_SYMBOL(_lantern__foreach_abs_tensorlist) LOAD_SYMBOL(_lantern__foreach_abs__tensorlist) LOAD_SYMBOL(_lantern__foreach_acos_tensorlist) @@ -12511,10 +12932,20 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_erf__tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc_tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc__tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp_tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp__tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1_tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1__tensorlist) LOAD_SYMBOL(_lantern__foreach_floor_tensorlist) LOAD_SYMBOL(_lantern__foreach_floor__tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac_tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac__tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lgamma_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lgamma__tensorlist) LOAD_SYMBOL(_lantern__foreach_log_tensorlist) LOAD_SYMBOL(_lantern__foreach_log__tensorlist) LOAD_SYMBOL(_lantern__foreach_log10_tensorlist) @@ -12523,51 +12954,47 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_log1p__tensorlist) LOAD_SYMBOL(_lantern__foreach_log2_tensorlist) LOAD_SYMBOL(_lantern__foreach_log2__tensorlist) + LOAD_SYMBOL(_lantern__foreach_max_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg__tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan__tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin__tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh__tensorlist) - LOAD_SYMBOL(_lantern__foreach_round_tensorlist) - LOAD_SYMBOL(_lantern__foreach_round__tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma__tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac_tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac__tensorlist) + LOAD_SYMBOL(_lantern__foreach_norm_tensorlist_scalar_scalartype) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_pow_scalar_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow__tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_reciprocal_tensorlist) LOAD_SYMBOL(_lantern__foreach_reciprocal__tensorlist) + LOAD_SYMBOL(_lantern__foreach_round_tensorlist) + LOAD_SYMBOL(_lantern__foreach_round__tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid_tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh__tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt__tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan__tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh__tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc_tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc__tensorlist) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_norm_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp__tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_zero__tensorlist) + LOAD_SYMBOL(_lantern__foreach_copy__tensorlist_tensorlist_bool) + LOAD_SYMBOL(_lantern__foreach_copy_tensorlist_tensorlist_bool) LOAD_SYMBOL(_lantern_bucketize_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_bucketize_out_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_bucketize_scalar_tensor_bool_bool) LOAD_SYMBOL(_lantern_searchsorted_tensor_tensor_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor) + LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern__convert_indices_from_coo_to_csr_tensor_intt_bool) LOAD_SYMBOL(_lantern__convert_indices_from_coo_to_csr_out_tensor_tensor_intt_bool) LOAD_SYMBOL(_lantern__convert_indices_from_csr_to_coo_tensor_tensor_bool_bool) @@ -12998,6 +13425,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_Tensor_logdet_tensor) LOAD_SYMBOL(_lantern_linalg_eig_tensor) LOAD_SYMBOL(_lantern_linalg_eig_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__linalg_eigvals_tensor) LOAD_SYMBOL(_lantern_linalg_eigvals_tensor) LOAD_SYMBOL(_lantern_linalg_eigvals_out_tensor_tensor) LOAD_SYMBOL(_lantern__linalg_eigh_tensor_cstringview_bool) @@ -13057,6 +13485,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_linalg_solve_ex_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_linalg_solve_ex_out_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_linalg_solve_tensor_tensor_bool) + LOAD_SYMBOL(_lantern__spsolve_tensor_tensor_bool) LOAD_SYMBOL(_lantern_linalg_solve_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_linalg_tensorinv_tensor_intt) LOAD_SYMBOL(_lantern_linalg_tensorinv_out_tensor_tensor_intt) @@ -13078,6 +13507,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_linalg_multi_dot_out_tensor_tensorlist) LOAD_SYMBOL(_lantern_nested_to_padded_tensor_tensor_double_intarrayref) LOAD_SYMBOL(_lantern__test_serialization_subcmul_tensor_tensor_scalar) + LOAD_SYMBOL(_lantern__test_parallel_materialize_tensor_intt_bool) LOAD_SYMBOL(_lantern__test_optional_intlist_tensor_intarrayref) LOAD_SYMBOL(_lantern__test_optional_filled_intlist_tensor_intarrayref) LOAD_SYMBOL(_lantern__test_optional_floatlist_tensor_arrayrefdouble) @@ -13091,7 +13521,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__test_autograd_multiple_dispatch_view_copy_tensor) LOAD_SYMBOL(_lantern_segment_reduce_tensor_cstringview_tensor_tensor_tensor_intt_bool_scalar) LOAD_SYMBOL(_lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_tensor_intt_scalar) - LOAD_SYMBOL(_lantern_pad_sequence_tensorlist_bool_double) + LOAD_SYMBOL(_lantern_pad_sequence_tensorlist_bool_double_cstringview) LOAD_SYMBOL(_lantern_flatten_dense_tensors_tensorlist) LOAD_SYMBOL(_lantern_unflatten_dense_tensors_tensor_tensorlist) LOAD_SYMBOL(_lantern__nested_tensor_from_tensor_list_tensorlist_scalartype_layout_device_bool) @@ -13135,28 +13565,35 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unfold_copy_tensor_intt_intt_intt) LOAD_SYMBOL(_lantern_alias_copy_tensor) LOAD_SYMBOL(_lantern_Tensor_to_padded_tensor_tensor_double_intarrayref) + LOAD_SYMBOL(_lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double) + LOAD_SYMBOL(_lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt) LOAD_SYMBOL(_lantern__nested_tensor_softmax_with_shape_tensor_tensor) + LOAD_SYMBOL(_lantern__safe_softmax_tensor_intt_scalartype) LOAD_SYMBOL(_lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt) - LOAD_SYMBOL(_lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool) - LOAD_SYMBOL(_lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor) - LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt) - LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool) - LOAD_SYMBOL(_lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool) - LOAD_SYMBOL(_lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt) - LOAD_SYMBOL(_lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool) - LOAD_SYMBOL(_lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool) + LOAD_SYMBOL(_lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool) + LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool) + LOAD_SYMBOL(_lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double) + LOAD_SYMBOL(_lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double) + LOAD_SYMBOL(_lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt) + LOAD_SYMBOL(_lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt) + LOAD_SYMBOL(_lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool) LOAD_SYMBOL(_lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double) + LOAD_SYMBOL(_lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt) LOAD_SYMBOL(_lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_special_airy_ai_tensor) LOAD_SYMBOL(_lantern_special_airy_ai_out_tensor_tensor) - LOAD_SYMBOL(_lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_special_bessel_j0_tensor) LOAD_SYMBOL(_lantern_special_bessel_j0_out_tensor_tensor) LOAD_SYMBOL(_lantern_special_bessel_j1_tensor) @@ -13253,7 +13690,13 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_special_spherical_bessel_j0_out_tensor_tensor) LOAD_SYMBOL(_lantern__foobar_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__propagate_xla_data_tensor_tensor) LOAD_SYMBOL(_lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__cudnn_ctc_loss_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intt_bool_bool) LOAD_SYMBOL(_lantern__cudnn_rnn_flatten_weight_out_tensor_tensorlist_intt_intt_intt_intt_intt_intt_bool_bool) @@ -13268,6 +13711,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__add_relu_out_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_add_out_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool) + LOAD_SYMBOL(_lantern__test_functorch_fallback_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_bartlett_window_out_tensor_intt) LOAD_SYMBOL(_lantern_bartlett_window_out_tensor_intt_bool) LOAD_SYMBOL(_lantern_quantized_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt) @@ -13295,7 +13739,6 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_cudnn_affine_grid_generator_backward_out_tensor_tensor_intt_intt_intt_intt) LOAD_SYMBOL(_lantern_cudnn_batch_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double) LOAD_SYMBOL(_lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_tensor) - LOAD_SYMBOL(_lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool) LOAD_SYMBOL(_lantern__mps_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern_mps_convolution_transpose_backward_out_tensor_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_stdarraybool) @@ -13319,6 +13762,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__embedding_bag_dense_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_bool_intt_tensor_intt) LOAD_SYMBOL(_lantern__embedding_bag_per_sample_weights_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat) + LOAD_SYMBOL(_lantern_empty_permuted_out_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_new_empty_out_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern_new_empty_strided_out_tensor_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_new_full_out_tensor_tensor_intarrayref_scalar) @@ -13335,6 +13779,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_empty_strided_out_tensor_intarrayref_intarrayref) LOAD_SYMBOL(_lantern_fill_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_fill_out_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern_floor_divide_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_full_out_tensor_intarrayref_scalar_dimnamelist) LOAD_SYMBOL(_lantern_full_like_out_tensor_tensor_scalar_memoryformat) LOAD_SYMBOL(_lantern_from_file_out_tensor_cstringview_bool_intt) @@ -13354,9 +13799,9 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_kaiser_window_out_tensor_intt_bool_double) LOAD_SYMBOL(_lantern_native_group_norm_out_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_double) LOAD_SYMBOL(_lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_intt_intt_stdarraybool) - LOAD_SYMBOL(_lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool) - LOAD_SYMBOL(_lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool) - LOAD_SYMBOL(_lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool) + LOAD_SYMBOL(_lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool) + LOAD_SYMBOL(_lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_isnan_out_tensor_tensor) LOAD_SYMBOL(_lantern_native_layer_norm_out_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_double) LOAD_SYMBOL(_lantern_native_layer_norm_backward_out_tensor_tensor_tensor_tensor_tensor_intarrayref_tensor_tensor_tensor_tensor_stdarraybool) @@ -13375,6 +13820,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_mkldnn_max_pool3d_backward_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool1d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) + LOAD_SYMBOL(_lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool) LOAD_SYMBOL(_lantern_median_out_tensor_tensor) LOAD_SYMBOL(_lantern_nanmedian_out_tensor_tensor) LOAD_SYMBOL(_lantern__mps_convolution_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt) @@ -13392,6 +13838,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_sparse_matmul_out_tensor_tensor_tensor) LOAD_SYMBOL(_lantern_mul_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_tensor_bool_double_double) + LOAD_SYMBOL(_lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern_batch_norm_stats_out_tensor_tensor_tensor_double) LOAD_SYMBOL(_lantern_batch_norm_gather_stats_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern_batch_norm_gather_stats_with_counts_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double_tensor) @@ -13434,7 +13881,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unsafe_split_out_tensorlist_tensor_intt_intt) LOAD_SYMBOL(_lantern_unsafe_split_with_sizes_out_tensorlist_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_sum_out_tensor_tensor_scalartype) - LOAD_SYMBOL(_lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern_prod_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern__mkldnn_transpose_out_tensor_tensor_intt_intt) LOAD_SYMBOL(_lantern_flip_out_tensor_tensor_intarrayref) @@ -13445,8 +13892,11 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__nested_from_padded_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern__nested_tensor_size_out_tensor_tensor) LOAD_SYMBOL(_lantern__nested_tensor_strides_out_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_tensor_storage_offsets_out_tensor_tensor) LOAD_SYMBOL(_lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref) + LOAD_SYMBOL(_lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor) + LOAD_SYMBOL(_lantern__nested_get_values_copy_out_tensor_tensor) LOAD_SYMBOL(_lantern__trilinear_out_tensor_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt) LOAD_SYMBOL(_lantern__unique_out_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern_unique_dim_out_tensor_tensor_tensor_tensor_intt_bool_bool_bool) @@ -13454,7 +13904,7 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_unique_dim_consecutive_out_tensor_tensor_tensor_tensor_intt_bool_bool) LOAD_SYMBOL(_lantern__unique2_out_tensor_tensor_tensor_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__unsafe_view_out_tensor_tensor_intarrayref) - LOAD_SYMBOL(_lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool) + LOAD_SYMBOL(_lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool) LOAD_SYMBOL(_lantern__weight_norm_interface_out_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern__weight_norm_interface_backward_out_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_zeros_out_tensor_intarrayref_dimnamelist) @@ -13468,6 +13918,8 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern_binomial_out_tensor_tensor_tensor_generator) LOAD_SYMBOL(_lantern_native_norm_out_tensor_tensor_scalar) LOAD_SYMBOL(_lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype) + LOAD_SYMBOL(_lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double) + LOAD_SYMBOL(_lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double) LOAD_SYMBOL(_lantern__sparse_sum_out_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_sum_backward_out_tensor_tensor_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_csr_sum_out_tensor_tensor_intarrayref_bool_scalartype) @@ -13492,27 +13944,28 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__sparse_addmm_out_tensor_tensor_tensor_tensor_scalar_scalar) LOAD_SYMBOL(_lantern_sparse_coo_tensor_out_tensor_intarrayref) LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref) - LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor) + LOAD_SYMBOL(_lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool) LOAD_SYMBOL(_lantern_sparse_resize_out_tensor_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_and_clear_out_tensor_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_resize_and_clear_tensor_intarrayref_intt_intt) LOAD_SYMBOL(_lantern_sparse_mask_out_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__to_dense_out_tensor_tensor_scalartype) + LOAD_SYMBOL(_lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool) + LOAD_SYMBOL(_lantern__to_dense_out_tensor_tensor_scalartype_bool) LOAD_SYMBOL(_lantern__coalesce_out_tensor_tensor) LOAD_SYMBOL(_lantern__coalesced_out_tensor_tensor_bool) LOAD_SYMBOL(_lantern__coalesced_tensor_bool) LOAD_SYMBOL(_lantern_copy_sparse_to_sparse_out_tensor_tensor_tensor_bool) LOAD_SYMBOL(_lantern_copy_sparse_to_sparse_tensor_tensor_bool) - LOAD_SYMBOL(_lantern_to_sparse_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt) - LOAD_SYMBOL(_lantern_to_sparse_csr_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_csc_out_tensor_tensor_intt) - LOAD_SYMBOL(_lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt) - LOAD_SYMBOL(_lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_csr_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_csc_out_tensor_tensor_intt) + LOAD_SYMBOL(_lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt) + LOAD_SYMBOL(_lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt) LOAD_SYMBOL(_lantern_to_mkldnn_out_tensor_tensor_scalartype) LOAD_SYMBOL(_lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) - LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt) + LOAD_SYMBOL(_lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref) LOAD_SYMBOL(_lantern_quantize_per_tensor_dynamic_out_tensor_tensor_scalartype_bool) LOAD_SYMBOL(_lantern_quantize_per_tensor_out_tensor_tensor_double_intt_scalartype) LOAD_SYMBOL(_lantern_quantize_per_tensor_out_tensor_tensor_tensor_tensor_scalartype) @@ -13592,7 +14045,6 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__histogramdd_from_bin_cts_out_tensor_tensor_intarrayref_arrayrefdouble_tensor_bool) LOAD_SYMBOL(_lantern__histogramdd_from_bin_tensors_out_tensor_tensor_tensorlist_tensor_bool) LOAD_SYMBOL(_lantern_remainder_out_tensor_scalar_tensor) - LOAD_SYMBOL(_lantern_argsort_out_tensor_tensor_bool_intt_bool) LOAD_SYMBOL(_lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt) LOAD_SYMBOL(_lantern_normal_out_tensor_tensor_double_double_generator) LOAD_SYMBOL(_lantern__amp_foreach_non_finite_check_and_unscale_out_tensorlist_tensorlist_tensor_tensor) @@ -13600,33 +14052,38 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__amp_update_scale_out_tensor_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__amp_update_scale_tensor_tensor_tensor_double_double_intt) LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_scalar) LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_div_out_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_exp_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_zero_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sqrt_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) + LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) LOAD_SYMBOL(_lantern__foreach_abs_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_acos_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_asin_out_tensorlist_tensorlist) @@ -13636,34 +14093,37 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__foreach_cosh_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_erf_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_erfc_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_exp_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_expm1_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_floor_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_frac_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_lgamma_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log10_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log1p_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_log2_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_max_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_neg_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tan_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_tanh_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sin_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_sinh_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_round_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lgamma_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_frac_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar) LOAD_SYMBOL(_lantern__foreach_reciprocal_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_round_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_sigmoid_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sign_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sin_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sinh_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_sqrt_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tan_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_tanh_out_tensorlist_tensorlist) LOAD_SYMBOL(_lantern__foreach_trunc_out_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar) - LOAD_SYMBOL(_lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor) - LOAD_SYMBOL(_lantern__foreach_norm_out_tensorlist_tensorlist_scalar) - LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist) - LOAD_SYMBOL(_lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar) + LOAD_SYMBOL(_lantern__foreach_zero_out_tensorlist_tensorlist) + LOAD_SYMBOL(_lantern__foreach_zero_tensorlist) + LOAD_SYMBOL(_lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool) LOAD_SYMBOL(_lantern_bucketize_out_tensor_scalar_tensor_bool_bool) - LOAD_SYMBOL(_lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor) LOAD_SYMBOL(_lantern_glu_jvp_out_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_glu_backward_jvp_out_tensor_tensor_tensor_tensor_tensor_tensor_intt) LOAD_SYMBOL(_lantern_hardswish_backward_out_tensor_tensor_tensor) @@ -13726,13 +14186,21 @@ LOAD_SYMBOL(_lantern_buffer_from_tensor); LOAD_SYMBOL(_lantern__native_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_bool_bool_intt) LOAD_SYMBOL(_lantern__triton_scaled_dot_attention_out_tensor_tensor_tensor_tensor_double) LOAD_SYMBOL(_lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor) - LOAD_SYMBOL(_lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool) LOAD_SYMBOL(_lantern__foobar_out_tensor_tensor_bool_bool_bool) LOAD_SYMBOL(_lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) LOAD_SYMBOL(_lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) + LOAD_SYMBOL(_lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor) /* Autogen Symbols -- End */ return true; diff --git a/src/lantern/include/lantern/types.h b/src/lantern/include/lantern/types.h index 111aabfff8..9f606c9b95 100644 --- a/src/lantern/include/lantern/types.h +++ b/src/lantern/include/lantern/types.h @@ -1,3 +1,4 @@ +#include #include // https://pt.stackoverflow.com/a/438284/6036 @@ -156,6 +157,7 @@ LANTERN_FROM_RAW_DECL(SymInt, c10::SymInt) LANTERN_FROM_RAW_DECL(SymIntArrayRef, c10::SymIntArrayRef) LANTERN_FROM_RAW_DECL(FunctionSchema, c10::FunctionSchema) LANTERN_FROM_RAW_DECL(Argument, c10::Argument) +LANTERN_FROM_RAW_DECL(DeviceIndex, at::DeviceIndex) namespace optional { LANTERN_FROM_RAW_DECL(DimnameList, c10::optional) @@ -589,6 +591,7 @@ LANTERN_FROM_RAW(SymInt, c10::SymInt) LANTERN_FROM_RAW_WRAPPED(SymIntArrayRef, self_contained::SymIntArrayRef, c10::SymIntArrayRef) LANTERN_FROM_RAW(FunctionSchema, c10::FunctionSchema) LANTERN_FROM_RAW(Argument, c10::Argument) +LANTERN_FROM_RAW(DeviceIndex, at::DeviceIndex) namespace optional { LANTERN_FROM_RAW_WRAPPED(DimnameList, self_contained::optional::DimnameList, diff --git a/src/lantern/src/Allocator.cpp b/src/lantern/src/Allocator.cpp index b925cea871..0b497ca43c 100644 --- a/src/lantern/src/Allocator.cpp +++ b/src/lantern/src/Allocator.cpp @@ -40,7 +40,7 @@ namespace c10 { struct LanternCPUAllocator final : at::Allocator { LanternCPUAllocator() {} ~LanternCPUAllocator() override {} - at::DataPtr allocate(size_t nbytes) const override { + at::DataPtr allocate(size_t nbytes) override { void* data; { @@ -90,6 +90,10 @@ struct LanternCPUAllocator final : at::Allocator { free_cpu(ptr); } + void copy_data(void* dest, const void* src, std::size_t count) const override { + default_copy_data(dest, src, count); + } + at::DeleterFnPtr raw_deleter() const override { return &ReportAndDelete; } }; diff --git a/src/lantern/src/Amp.cpp b/src/lantern/src/Amp.cpp index 1586d72a23..03082be29b 100644 --- a/src/lantern/src/Amp.cpp +++ b/src/lantern/src/Amp.cpp @@ -1,3 +1,5 @@ +#include +#include #define LANTERN_BUILD #include #include @@ -6,39 +8,39 @@ bool _lantern_amp_is_autocast_gpu_enabled() { LANTERN_FUNCTION_START - return at::autocast::is_enabled(); + return at::autocast::is_autocast_enabled(torch::DeviceType::CUDA); LANTERN_FUNCTION_END } bool _lantern_amp_is_autocast_cpu_enabled() { LANTERN_FUNCTION_START - return at::autocast::is_cpu_enabled(); + return at::autocast::is_autocast_enabled(torch::DeviceType::CPU); LANTERN_FUNCTION_END } void _lantern_amp_autocast_set_gpu_enabled(bool enabled) { LANTERN_FUNCTION_START - at::autocast::set_enabled(enabled); + at::autocast::set_autocast_enabled(torch::DeviceType::CUDA, enabled); LANTERN_FUNCTION_END_VOID } void _lantern_amp_autocast_set_cpu_enabled(bool enabled) { LANTERN_FUNCTION_START - at::autocast::set_cpu_enabled(enabled); + at::autocast::set_autocast_enabled(torch::DeviceType::CPU, enabled); LANTERN_FUNCTION_END_VOID } void _lantern_amp_autocast_set_gpu_dtype (void* dtype) { LANTERN_FUNCTION_START auto scalar_type = from_raw::ScalarType(dtype); - at::autocast::set_autocast_gpu_dtype(scalar_type); + at::autocast::set_autocast_dtype(torch::DeviceType::CUDA,scalar_type); LANTERN_FUNCTION_END_VOID } void _lantern_amp_autocast_set_cpu_dtype (void* dtype) { LANTERN_FUNCTION_START auto scalar_type = from_raw::ScalarType(dtype); - at::autocast::set_autocast_cpu_dtype(scalar_type); + at::autocast::set_autocast_dtype(torch::DeviceType::CPU, scalar_type); LANTERN_FUNCTION_END_VOID } @@ -56,16 +58,16 @@ bool _lantern_amp_autocast_is_cache_enabled() { void* _lantern_amp_autocast_get_gpu_dtype () { LANTERN_FUNCTION_START - auto scalar_type = at::autocast::get_autocast_gpu_dtype(); + auto scalar_type = at::autocast::get_autocast_dtype(torch::DeviceType::CUDA); return make_raw::ScalarType(scalar_type); - LANTERN_FUNCTION_END_VOID + LANTERN_FUNCTION_END } void* _lantern_amp_autocast_get_cpu_dtype () { LANTERN_FUNCTION_START - auto scalar_type = at::autocast::get_autocast_cpu_dtype(); + auto scalar_type = at::autocast::get_autocast_dtype(torch::DeviceType::CPU); return make_raw::ScalarType(scalar_type); - LANTERN_FUNCTION_END_VOID + LANTERN_FUNCTION_END } void _lantern_amp_autocast_increment_nesting () { diff --git a/src/lantern/src/Function.cpp b/src/lantern/src/Function.cpp index 4eaaa94e80..6e675e816f 100644 --- a/src/lantern/src/Function.cpp +++ b/src/lantern/src/Function.cpp @@ -1,7 +1,9 @@ #include "Function.h" +#include #include #include +#include #include "Autograd.h" #include "lantern/lantern.h" @@ -64,13 +66,18 @@ variable_list LanternFunction::apply(variable_list args, void *forward_, LANTERN_ERROR_HANDLE } + const std::unordered_set set; + auto wrapped_outputs = _wrap_outputs( args, node->ctx_.get_non_differentiable(), node->ctx_.get_dirty(), to_optional(outputs), is_executable ? node : nullptr, // TODO: not sure what this function should actually do. Seems to be // related to functorch & co. Hopefully it's not used by the currrent // code. - [](variable_list x, variable_list y) { return x; }); + [](variable_list x, variable_list y) { return x; }, + set, + [](torch::Tensor x) {return x;} + ); node->output_info_.reserve(wrapped_outputs.size()); for (auto &output : wrapped_outputs) { diff --git a/src/lantern/src/Save.cpp b/src/lantern/src/Save.cpp index 7a38fdde85..1e950e9604 100644 --- a/src/lantern/src/Save.cpp +++ b/src/lantern/src/Save.cpp @@ -1,12 +1,15 @@ #include +#include #define LANTERN_BUILD #include +#include #include "base64.hpp" #include "lantern/lantern.h" #include "utils.hpp" +#include "Unpickler.h" std::string size_t_to_string(std::size_t i) { std::stringstream ss; @@ -67,10 +70,6 @@ void _lantern_test_print(void* x) { LANTERN_FUNCTION_END_VOID } -namespace torch::jit { -IValue pickle_load2(const std::vector& data); -IValue pickle_load3(std::string path); -} void* _lantern_load_state_dict(void* path, bool legacy_stream) { LANTERN_FUNCTION_START auto path_ = from_raw::string(path); @@ -80,9 +79,10 @@ void* _lantern_load_state_dict(void* path, bool legacy_stream) { std::ifstream file(path_.c_str(), std::ios::binary); std::vector data((std::istreambuf_iterator(file)), std::istreambuf_iterator()); - ivalue = torch::jit::pickle_load2(data); + ivalue = torch::pickle_load(data); } else { - ivalue = torch::jit::pickle_load3(path_); + caffe2::serialize::PyTorchStreamReader reader(path_); + ivalue = torch::jit::lantern_read_pickle("data", reader); } return make_raw::IValue(ivalue); diff --git a/src/lantern/src/Tensor.cpp b/src/lantern/src/Tensor.cpp index 6e26e4f16a..451e5c1df8 100644 --- a/src/lantern/src/Tensor.cpp +++ b/src/lantern/src/Tensor.cpp @@ -353,18 +353,35 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable { } // Just swallow the event, don't do anything - void trace_gpu_event_creation(uintptr_t event) const override {} - void trace_gpu_event_deletion(uintptr_t event) const override {} - void trace_gpu_event_record(uintptr_t event, uintptr_t stream) + void trace_gpu_event_creation(c10::DeviceType device_type, uintptr_t event) const override {} + void trace_gpu_event_deletion(c10::DeviceType device_type, uintptr_t event) const override {} + void trace_gpu_event_record(c10::DeviceType device_type, uintptr_t event, uintptr_t stream) const override {} - void trace_gpu_event_wait(uintptr_t event, uintptr_t stream) const override {} - void trace_gpu_memory_allocation(uintptr_t ptr) const override {} - void trace_gpu_memory_deallocation(uintptr_t ptr) const override {} - void trace_gpu_stream_creation(uintptr_t stream) const override {} - void trace_gpu_device_synchronization() const override {} - void trace_gpu_stream_synchronization(uintptr_t stream) const override {} - void trace_gpu_event_synchronization(uintptr_t event) const override {} - + void trace_gpu_event_wait(c10::DeviceType device_type, uintptr_t event, uintptr_t stream) const override {} + void trace_gpu_memory_allocation(c10::DeviceType device_type, uintptr_t ptr) const override {} + void trace_gpu_memory_deallocation(c10::DeviceType device_type, uintptr_t ptr) const override {} + void trace_gpu_stream_creation(c10::DeviceType device_type, uintptr_t stream) const override {} + void trace_gpu_device_synchronization(c10::DeviceType device_type) const override {} + void trace_gpu_stream_synchronization(c10::DeviceType device_type, uintptr_t stream) const override {} + void trace_gpu_event_synchronization(c10::DeviceType device_type, uintptr_t event) const override {} + + + void incref(PyObject* pyobj) const override {} + void reportErrorCallback(PyObject* callback, DispatchKey key) const override {} + void python_op_registration_trampoline(const c10::OperatorHandle& op, + c10::DispatchKey, + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset, + bool with_op) const override {} + void throw_abstract_impl_not_imported_error(std::string opname, + const char* pymodule, + const char* context) const override {} + int64_t numel(const TensorImpl* self) const override { + return self->numel(); + } + void reset_backward_hooks(const TensorImpl* self) const override {} + }; } // namespace impl diff --git a/src/lantern/src/Pickler.cpp b/src/lantern/src/Unpickler.cpp similarity index 77% rename from src/lantern/src/Pickler.cpp rename to src/lantern/src/Unpickler.cpp index e4bf753b74..da5e616255 100644 --- a/src/lantern/src/Pickler.cpp +++ b/src/lantern/src/Unpickler.cpp @@ -1,5 +1,10 @@ #include #include +#include +#include +#include +#include +#include #ifdef USE_RPC #include #endif @@ -8,106 +13,32 @@ #include #include #include +#include #include - -#ifdef _WIN32 -#define LANTERN_API __declspec(dllexport) -#else -#define LANTERN_API -#endif +#include +#include +#include +#include +#include +#include +#include namespace torch { namespace jit { -// See Python's pickletools.py for a detailed description of each of these codes -enum class PickleOpCode2 : char { - MARK = '(', - STOP = '.', - POP = '0', - POP_MARK = '1', - DUP = '2', - FLOAT = 'F', - INT = 'I', - BININT = 'J', - BININT1 = 'K', - LONG = 'L', - BININT2 = 'M', - NONE = 'N', - PERSID = 'P', - BINPERSID = 'Q', - REDUCE = 'R', - STRING = 'S', - BINSTRING = 'T', - SHORT_BINSTRING = 'U', - // NB: Avoid using UNICODE as it is a macro in the Windows API - UNICODE_ = 'V', - BINUNICODE = 'X', - APPEND = 'a', - BUILD = 'b', - GLOBAL = 'c', - DICT = 'd', - EMPTY_DICT = '}', - APPENDS = 'e', - GET = 'g', - BINGET = 'h', - INST = 'i', - LONG_BINGET = 'j', - LIST = 'l', - EMPTY_LIST = ']', - OBJ = 'o', - PUT = 'p', - BINPUT = 'q', - LONG_BINPUT = 'r', - SETITEM = 's', - TUPLE = 't', - EMPTY_TUPLE = ')', - SETITEMS = 'u', - BINFLOAT = 'G', - - // Protocol 2 - PROTO = char('\x80'), - NEWOBJ = '\x81', - EXT1 = '\x82', - EXT2 = '\x83', - EXT4 = '\x84', - TUPLE1 = '\x85', - TUPLE2 = '\x86', - TUPLE3 = '\x87', - NEWTRUE = '\x88', - NEWFALSE = '\x89', - LONG1 = '\x8a', - LONG4 = '\x8b', - - // Protocol 3 (Python 3.x) - BINBYTES = 'B', - SHORT_BINBYTES = 'C', - - // Protocol 4 - SHORT_BINUNICODE = char('\x8c'), - BINUNICODE8 = '\x8d', - BINBYTES8 = '\x8e', - EMPTY_SET = '\x8f', - ADDITEMS = '\x90', - FROZENSET = '\x91', - NEWOBJ_EX = '\x92', - STACK_GLOBAL = '\x93', - MEMOIZE = '\x94', - FRAME = '\x95' -}; - using TypeResolver = std::function; using ObjLoader = std::function< - c10::intrusive_ptr(at::StrongTypePtr, IValue)>; + c10::intrusive_ptr(const at::StrongTypePtr&, IValue)>; class DeserializationStorageContext; -// [unpickler refactor] there is some cruft around PickleOpCode2::BUILD, -// PickleOpCode2::NEWOBJ, and the last_opcode_ member below that should be +// [unpickler refactor] there is some cruft around PickleOpCode::BUILD, +// PickleOpCode::NEWOBJ, and the last_opcode_ member below that should be // deleted at some point, the Pickler doesn't produce it and it's only around to // support models saved before 1.1 -class LANTERN_API LanternUnpickler { +class LanternUnpickler { AT_DISALLOW_COPY_AND_ASSIGN(LanternUnpickler); using TypeParserT = c10::TypePtr (*)(const std::string&); @@ -131,6 +62,21 @@ class LANTERN_API LanternUnpickler { type_parser_(type_parser), version_(caffe2::serialize::kProducedFileFormatVersion) {} + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + LanternUnpickler( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + ObjLoader obj_loader, + TypeParserT type_parser = defaultTypeParser) + : reader_(std::move(reader)), + tensor_table_(tensor_table), + type_resolver_(std::move(type_resolver)), + obj_loader_(std::move(obj_loader)), + use_storage_device_(false), + type_parser_(type_parser), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + // tensors inside the pickle contain meta-data, the raw tensor // dead is retrieved by calling `read_record`. // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) @@ -139,7 +85,7 @@ class LANTERN_API LanternUnpickler { TypeResolver type_resolver, ObjLoader obj_loader, std::function read_record, - c10::optional device, + std::optional device, bool use_storage_device = false, TypeParserT type_parser = defaultTypeParser, std::shared_ptr storage_context = nullptr) @@ -148,8 +94,7 @@ class LANTERN_API LanternUnpickler { type_resolver_(std::move(type_resolver)), obj_loader_(std::move(obj_loader)), read_record_(std::move(read_record)), - // NOLINTNEXTLINE(performance-move-const-arg) - device_(std::move(device)), + device_(device), use_storage_device_(use_storage_device), type_parser_(type_parser), storage_context_(std::move(storage_context)), @@ -205,16 +150,18 @@ class LANTERN_API LanternUnpickler { const std::string& module_name, const std::string& class_name); void rebuildTensor(bool quantized); + void rebuildTensorFromTypeV2(); void rebuildSparseTensor(); #ifdef USE_DISTRIBUTED void rebuildRRef(); #endif - PickleOpCode2 readInstruction(); - PickleOpCode2 readOpCode() { - return static_cast(read()); + PickleOpCode readInstruction(); + PickleOpCode readOpCode() { + return static_cast(read()); } std::string readString(); void readList(IValue list_ivalue); + void readListElements(IValue list_ivalue, size_t start); void setInput(size_t memo_id); void run(); @@ -247,7 +194,7 @@ class LANTERN_API LanternUnpickler { IValue empty_tuple_; std::function read_record_; - c10::optional device_; + std::optional device_; // When set to true, Unpickler will ignore the pickled device and use the // device of the DataPtr returned by the read_record_ function. The default // value of this flag is false. @@ -261,18 +208,10 @@ class LANTERN_API LanternUnpickler { // See [type tag serialization] uint64_t version_; -}; - -void restoreAccurateTypeTags(const IValue& root, const c10::TypePtr& type_tag); - -} // namespace jit -} // namespace torch - - -namespace torch { -namespace jit { -using ::c10::IValue; + // See [NOTE] skip_next_read_global + uint8_t skip_next_read_global = 0; +}; static void restoreAccurateTypeTagsIfPossible(const IValue& root) { if (root.isObject()) { @@ -347,13 +286,18 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) { // no op, there is nothing to tag break; case c10::SymIntType::Kind: - TORCH_CHECK(!w.value.toSymInt().is_symbolic()); + // TODO: Can this really show up though? :think: + TORCH_CHECK(!w.value.toSymInt().is_heap_allocated()); // no op, there is nothing to tag break; case c10::SymFloatType::Kind: TORCH_CHECK(!w.value.toSymFloat().is_symbolic()); // no op, there is nothing to tag break; + case c10::SymBoolType::Kind: + TORCH_CHECK(!w.value.toSymBool().is_heap_allocated()); + // no op, there is nothing to tag + break; case DynamicType::Kind: case UnionType::Kind: case EnumType::Kind: @@ -373,6 +317,13 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) { to_process.emplace_back(std::move(elem)); } } break; + case AwaitType::Kind: { + auto aw = w.value.toAwait(); + if (aw->completed()) { + Work elem = {w.type->containedType(0), aw->wait()}; + to_process.emplace_back(std::move(elem)); + } + } break; case OptionalType::Kind: { if (!w.value.isNone()) { Work elem = {w.type->containedType(0), w.value}; @@ -388,7 +339,7 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) { auto elem_type = w.type->containedType(0); auto lst = w.value.toList(); lst.unsafeSetElementType(elem_type); - for (const IValue item : lst) { + for (const IValue& item : lst) { Work elem = {elem_type, item}; to_process.emplace_back(std::move(elem)); } @@ -422,6 +373,49 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) { } } +IValue lantern_read_pickle( + const std::string& archive_name, + caffe2::serialize::PyTorchStreamReader& stream_reader) { + std::string picklename = archive_name + ".pkl"; + at::DataPtr pickle_ptr; + size_t pickle_size = 0; + std::tie(pickle_ptr, pickle_size) = stream_reader.getRecord(picklename); + + size_t bytes_read = 0; + auto data = reinterpret_cast(pickle_ptr.get()); + auto reader = [&](char* buffer, size_t len) -> size_t { + if (bytes_read >= pickle_size) { + return 0; + } + len = std::min(pickle_size - bytes_read, len); + // Copy len bytes into buffer + const char* start = data + bytes_read; + std::memcpy(buffer, start, len); + bytes_read += len; + return len; + }; + + std::string tensor_dir_path = archive_name + "/"; + + auto read_record = [&](const std::string& name) { + std::string ss = tensor_dir_path + name; + return std::get<0>(stream_reader.getRecord(ss)); + }; + + LanternUnpickler unpickler( + reader, + nullptr, + nullptr, + std::move(read_record), + c10::nullopt, + false, + LanternUnpickler::defaultTypeParser, + std::move(nullptr)); + unpickler.set_version(stream_reader.version()); + return unpickler.parse_ivalue(); +} + + namespace { template bool is(const Type& type) { @@ -435,7 +429,9 @@ bool is(const Type& type) { } } // namespace -void restoreContainerTypeTags(const IValue& ivalue, const TypePtr& type) { +static void restoreContainerTypeTags( + const IValue& ivalue, + const TypePtr& type) { if (is(*type)) { auto dict = ivalue.toGenericDict(); dict.unsafeSetKeyType(type->containedType(0)); @@ -443,404 +439,69 @@ void restoreContainerTypeTags(const IValue& ivalue, const TypePtr& type) { } else if (is(*type)) { ivalue.toList().unsafeSetElementType(type->containedType(0)); } else { - AT_ERROR("Unknown type for tag restoration: " + type->annotation_str()); - } -} - -IValue LanternUnpickler::parse_ivalue() { - run(); - TORCH_CHECK( - stack_.size() == 1, - "Unpickler expected 1 element on the stack, but found ", - stack_.size()); - if (version_ <= 2) { - // See [type tag serialization] - restoreAccurateTypeTagsIfPossible(stack_[0]); + TORCH_CHECK( + false, "Unknown type for tag restoration: " + type->annotation_str()); } - return stack_[0]; } -double LanternUnpickler::readFloat() { - AT_ASSERT(sizeof(double) == 8); - double big_endian = read(); - // NOLINTNEXTLINE(cppcoreguidelines-init-variables) - double little_endian; - - // Pickle floats are big endian, so reverse the bytes - auto big_endian_ptr = reinterpret_cast(&big_endian); - std::reverse_copy( - big_endian_ptr, - big_endian_ptr + sizeof(big_endian), - reinterpret_cast(&little_endian)); - - return little_endian; +inline bool is_valid_python_id_char(char c) { + return c == '_' || c == '.' || (c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } -void LanternUnpickler::run() { - // Expect a PROTO opcode and protocol number at the start of blob - auto opcode = readOpCode(); - TORCH_CHECK( - opcode == PickleOpCode2::PROTO, - "Expected PROTO opcode at the start" - " of pickle archive, found ", - int(static_cast(opcode))); - uint8_t protocol = read(); - TORCH_CHECK( - protocol == 2, - "Only Pickle protocol 2 is supported, found protocol = ", - protocol); - +std::string LanternUnpickler::readString() { + std::string ss; while (true) { - PickleOpCode2 opcode = readInstruction(); - if (opcode == PickleOpCode2::STOP) { - return; + auto* const bufferStart = buffer_.data() + buffer_pos_; + const auto bufferLeft = buffer_.size() - buffer_pos_; + char* const newlinePtr = + static_cast(memchr(bufferStart, '\n', bufferLeft)); + if (newlinePtr) { + // read up to newline and we are done. + auto const charsRead = newlinePtr - bufferStart; + ss.append(bufferStart, charsRead); + buffer_remaining_ -= charsRead + 1; + buffer_pos_ += charsRead + 1; + break; + } else { + // read whole buffer, refill + for (const char* p = bufferStart; p < bufferStart + bufferLeft; ++p) { + // Simple check just in case there is no terminating '\n' + TORCH_CHECK( + is_valid_python_id_char(*p), + "Found character '", + int(uint8_t(*p)), + "' in string, ", + "strings must be qualified Python identifiers"); + } + ss.append(bufferStart, bufferLeft); + buffer_remaining_ = reader_(buffer_.data(), buffer_.size()); + buffer_pos_ = 0; } } -} -void LanternUnpickler::setInput(size_t memo_id) { - AT_ASSERT(!stack_.empty()); - if (memo_id >= memo_table_.size()) { - memo_table_.insert( - memo_table_.end(), memo_id - memo_table_.size(), IValue()); - memo_table_.push_back(stack_.back()); - } else { - memo_table_[memo_id] = stack_.back(); - } -} - -// emplace_back on bool vectors does not exist on some systems -// avoid it by calling push_back for bool -template -inline void append(std::vector& a, T&& e) { - a.emplace_back(std::forward(e)); -} -template <> -inline void append(std::vector& a, bool&& e) { - a.push_back(e); -} - -static std::vector tupleToIntList(const IValue& v) { - return fmap(v.toTupleRef().elements(), [](const IValue& v) -> int64_t { - return v.toInt(); - }); -} - -// note we cannot use toIntList, toDoubleList because during unpickling the -// lists are not yet tagged -template -static std::vector convertList(const IValue& v) { - return fmap(v.toListRef(), [](const IValue& elem) { return elem.to(); }); + return ss; } -PickleOpCode2 LanternUnpickler::readInstruction() { - auto opcode = readOpCode(); - switch (opcode) { - case PickleOpCode2::EMPTY_LIST: { - stack_.emplace_back(c10::impl::GenericList(AnyType::get())); - } break; - case PickleOpCode2::EMPTY_TUPLE: { - if (empty_tuple_.isNone()) { - // we only need one object, since tuples are not mutable. - empty_tuple_ = c10::ivalue::Tuple::create(std::vector()); - } - stack_.emplace_back(empty_tuple_); - } break; - case PickleOpCode2::BINPUT: { - size_t memo_id = read(); - setInput(memo_id); - } break; - case PickleOpCode2::LONG_BINPUT: { - TORCH_CHECK( - std::numeric_limits::max() >= - std::numeric_limits::max(), - "Found a LONG_BINPUT opcode, but size_t on this system is " - "not big enough to decode it"); - size_t memo_id = read(); - setInput(memo_id); - } break; - case PickleOpCode2::MARK: { - // Mark location of the container ivalue in the stack - marks_.push_back(stack_.size()); - } break; - case PickleOpCode2::NEWTRUE: { - stack_.emplace_back(true); - } break; - case PickleOpCode2::NEWFALSE: { - stack_.emplace_back(false); - } break; - case PickleOpCode2::NONE: { - stack_.emplace_back(IValue()); - } break; - case PickleOpCode2::BININT1: { - uint8_t value = read(); - stack_.emplace_back(int64_t(value)); - } break; - case PickleOpCode2::BININT2: { - uint16_t value = read(); - stack_.emplace_back(int64_t(value)); - } break; - case PickleOpCode2::BININT: { - int32_t value = read(); - stack_.emplace_back(int64_t(value)); - } break; - case PickleOpCode2::LONG1: { - // Only read LONG1s with 8 as the length - uint8_t length = read(); - TORCH_CHECK(length == 8, "Expected length to be 8, got ", int(length)); - stack_.emplace_back(int64_t(read())); - } break; - case PickleOpCode2::BINUNICODE: { - uint32_t length = read(); - stack_.emplace_back(readBytes(length)); - } break; - case PickleOpCode2::BINFLOAT: - stack_.emplace_back(readFloat()); - break; - case PickleOpCode2::TUPLE: { - TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); - size_t start = marks_.back(); - marks_.pop_back(); - std::vector elements; - const auto tupleSize = stack_.size() - start; - switch (tupleSize) { - case 3: { - auto e3 = pop(stack_); - auto e2 = pop(stack_); - auto e1 = pop(stack_); - stack_.emplace_back(c10::ivalue::Tuple::create( - std::move(e1), std::move(e2), std::move(e3))); - break; - } - case 2: { - auto e2 = pop(stack_); - auto e1 = pop(stack_); - stack_.emplace_back( - c10::ivalue::Tuple::create(std::move(e1), std::move(e2))); - break; - } - case 1: - stack_.emplace_back(c10::ivalue::Tuple::create(pop(stack_))); - break; - default: { - elements.reserve(stack_.size() - start); - auto start_it = stack_.begin() + start; - for (auto it = start_it; it != stack_.end(); ++it) { - elements.emplace_back(std::move(*it)); - } - stack_.erase(start_it, stack_.end()); - stack_.emplace_back(c10::ivalue::Tuple::create(std::move(elements))); - break; - } +void LanternUnpickler::readGlobal( + const std::string& module_name, + const std::string& class_name) { + if (this->skip_next_read_global) { + // See [NOTE] skip_next_read_global + this->skip_next_read_global--; + if (this->skip_next_read_global == 1) { + // Pass through to the correct handler + } else if (this->skip_next_read_global == 0) { + // Corresponds to the type of `Tensor` being unpickled + if (module_name != "torch" || class_name != "Tensor") { + TORCH_WARN( + "Trying to load a Subclassed Tensor, it will be converted to at::Tensor in C++"); } - } break; - case PickleOpCode2::TUPLE1: { - stack_.emplace_back(c10::ivalue::Tuple::create(pop(stack_))); - } break; - case PickleOpCode2::TUPLE2: { - auto e2 = pop(stack_); - auto e1 = pop(stack_); - stack_.emplace_back( - c10::ivalue::Tuple::create(std::move(e1), std::move(e2))); - } break; - case PickleOpCode2::TUPLE3: { - auto e3 = pop(stack_); - auto e2 = pop(stack_); - auto e1 = pop(stack_); - stack_.emplace_back(c10::ivalue::Tuple::create( - std::move(e1), std::move(e2), std::move(e3))); - } break; - case PickleOpCode2::EMPTY_DICT: - stack_.emplace_back( - c10::impl::GenericDict(AnyType::get(), AnyType::get())); - break; - case PickleOpCode2::APPENDS: { - TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); - size_t start = marks_.back(); - TORCH_CHECK( - start > 0 && start <= stack_.size(), - "Parsing error: wrong start index for stack_"); - auto list_ivalue = stack_.at(start - 1); - readList(list_ivalue); - } break; - case PickleOpCode2::LIST: { - IValue list_ivalue = c10::impl::GenericList(AnyType::get()); - readList(list_ivalue); - stack_.push_back(std::move(list_ivalue)); - } break; - case PickleOpCode2::DICT: { - TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); - size_t start = marks_.back(); - marks_.pop_back(); - auto dict = c10::impl::GenericDict(AnyType::get(), AnyType::get()); - for (size_t i = start; i < stack_.size(); i += 2) { - dict.insert_or_assign(stack_[i], stack_[i + 1]); - } - stack_.erase(stack_.begin() + start, stack_.end()); - stack_.emplace_back(std::move(dict)); - } break; - case PickleOpCode2::SETITEMS: { - TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); - size_t start = marks_.back(); - marks_.pop_back(); - TORCH_CHECK( - start > 0 && start <= stack_.size(), - "Parsing error: wrong start index for stack_"); - auto dict = stack_.at(start - 1).toGenericDict(); - for (size_t i = start; i < stack_.size(); i += 2) { - dict.insert_or_assign(stack_[i], stack_[i + 1]); - } - stack_.erase(stack_.begin() + start, stack_.end()); - } break; - case PickleOpCode2::BINGET: { - stack_.push_back(memo_table_.at(read())); - } break; - case PickleOpCode2::LONG_BINGET: { - stack_.push_back(memo_table_.at(read())); - } break; - case PickleOpCode2::STOP: - break; - case PickleOpCode2::GLOBAL: { - // Module name, it's not needed for anything - auto module_name = readString(); - auto class_name = readString(); - readGlobal(module_name, class_name); - } break; - case PickleOpCode2::NEWOBJ: { - // pop empty tuple, the actual action is stored in the globals_stack_ - stack_.pop_back(); - } break; - // because we have NEWOBJ do nothing, BUILD and REDUCE end up doing - // the same thing - case PickleOpCode2::BUILD: - case PickleOpCode2::REDUCE: { - // stack is: - // extract and remove from the stack: - - // In the OrderedDict case, the id has already been materialized - // and added to the stack, thus there's no but a Dict - // there, in this case we can just pop the functor args and break. - // The functor args in this case contain some other metadata like - // '{_metadata: {: {version: 1}}}' which seem to be safe to ignore. - if (stack_.at(stack_.size() - 2).isGenericDict()) { - stack_.pop_back(); - break; - } - - std::swap(*(stack_.end() - 2), *(stack_.end() - 1)); - size_t idx = stack_.back().toInt(); - stack_.pop_back(); - // stack is: - TORCH_CHECK( - idx < globals_.size(), - "Parsing error: out of bounds access to globals_"); - globals_.at(idx)(); - } break; - case PickleOpCode2::BINPERSID: { - auto tuple = pop(stack_).toTuple(); - const auto& args = tuple->elements(); - AT_ASSERT( - args.at(0).toStringRef() == "storage", - "unknown PERSID key ", - args.at(0).toStringRef()); - at::ScalarType type = args.at(1).toScalarType(); - const std::string& key = args.at(2).toStringRef(); - - at::Device device(args.at(3).toStringRef()); - if (device_) { - device = *device_; - } - - at::Storage storage; - if (storage_context_ != nullptr && storage_context_->hasStorage(key)) { - // for torch.package logic where storage may be loaded already - storage = storage_context_->getStorage(key); - } else { - int64_t numel = args.at(4).toInt(); - caffe2::TypeMeta dtype = at::CPU(type).typeMeta(); - - at::DataPtr storage_ptr; - if (numel > 0) { - // If there are no elements in the tensor, there's no point in - // reading a zero (0) byte file from the input stream and paying - // that cost. - storage_ptr = read_record_(key); - } - - storage = at::Storage( - c10::Storage::use_byte_size_t(), - numel * dtype.itemsize(), - std::move(storage_ptr), - /*allocator=*/nullptr, - /*resizable=*/false); // NB: we didn't set any allocator for the - // tensor - if (storage_context_ != nullptr) { - storage_context_->addStorage(key, storage); - } - } - - auto options = at::CPU(type).options(); - if (use_storage_device_) { - options = options.device(storage.device()); - device = storage.device(); - } - - at::Tensor tensor; - if (options.backend() == c10::Backend::QuantizedCPU) { - tensor = at::_empty_affine_quantized({}, options, 0, 0) - .set_(storage, 0, {}, {}); - } else { - tensor = at::empty({0}, options).set_(storage); - } - - if (device.is_cuda() || device.is_xpu() || device.is_meta() || - device.is_hpu()) { - tensor = tensor.to(device, tensor.scalar_type()); - } else if (device.type() != DeviceType::CPU) { - AT_ERROR( - "supported devices include CPU, CUDA and HPU, however got ", - DeviceTypeName(device.type(), false)); - } - stack_.emplace_back(std::move(tensor)); - } break; - case PickleOpCode2::SETITEM: { - // At this OpCode, stack looks like - // | Stack Bottom | - // | ...... | - // | Dict | -> (stack_size - 3) - // | Key | -> (stack_size - 2) - // | Value | -> (stack_size - 1) - TORCH_CHECK( - stack_.size() >= 3, - "Parsing error: stack doesn't have enough elements"); - - auto stack_size = stack_.size(); - auto dict_pos = stack_size - 3; - auto key_pos = stack_size - 2; - auto val_pos = stack_size - 1; - - TORCH_CHECK( - (dict_pos < stack_size) && (key_pos < stack_size) && - (val_pos < stack_size), - "Parsing error: attempted out-of-bounds access while processing SETITEM opcode"); - - auto dict = stack_.at(dict_pos).toGenericDict(); - dict.insert_or_assign(stack_.at(key_pos), stack_.at(val_pos)); - stack_.erase(stack_.begin() + (key_pos), stack_.end()); - } break; - default: { - AT_ERROR( - "Unknown opcode for unpickling at ", - reinterpret_cast(opcode), - ": ", - int(static_cast(opcode))); - } break; + stack_.emplace_back(int64_t(globals_.size() - 1)); + return; + } else { + TORCH_CHECK(false, "INVALID VALUES") + } } - return opcode; -} - -void LanternUnpickler::readGlobal( - const std::string& module_name, - const std::string& class_name) { // TODO [unpickler refactor] __main__ isn't used by the pickler anymore, this // is only here for bc-compatibility reasons if (module_name == "__main__") { @@ -858,7 +519,7 @@ void LanternUnpickler::readGlobal( stack_.back().toList().unsafeSetElementType(IntType::get()); }); } else { - AT_ERROR("Unknown pickler class id", class_name); + TORCH_CHECK(false, "Unknown pickler class id", class_name); } } else if (module_name == "torch.jit._pickle") { if (class_name == "build_tensor_from_id") { @@ -908,7 +569,7 @@ void LanternUnpickler::readGlobal( } else if (class_name == "build_boollist") { elem_type = BoolType::get(); } else { - AT_ERROR("Unknown pickler class id ", class_name); + TORCH_CHECK(false, "Unknown pickler class id ", class_name); } // Unpickle a list specialization (e.g. List[Tensor], List[int], ...) globals_.emplace_back([this, elem_type] { @@ -926,6 +587,12 @@ void LanternUnpickler::readGlobal( // Unpickle a tensor bool quantized = class_name == "_rebuild_qtensor"; rebuildTensor(quantized); + } else if ( + module_name == "torch._tensor" && + (class_name == "_rebuild_from_type_v2")) { + // Unpickle a Tensor with Python attributes or + // a Subclassed Tensor. + rebuildTensorFromTypeV2(); } else if ( module_name == "torch._utils" && class_name == "_rebuild_sparse_tensor") { rebuildSparseTensor(); @@ -942,11 +609,11 @@ void LanternUnpickler::readGlobal( } else if (module_name == "collections" && class_name == "OrderedDict") { // collections.OrderedDict is used in tensor serialization for a tensor's // backward hooks (but they are not actually saved with this Pickler) + // Python's model.state_dict() is an OrderedDict, but this is not used + // for model loading. globals_.emplace_back([this] { - // drop the Tuple that was argument to OrderedDict, and replace it - // with None OrderedDicts only appear in tensor deserialization and - // their value is never used - //stack_.back() = IValue(); + // The OrderedDict becomes a GenericDict. The inputs which are in + // stack.back() are fully ignored, but they are empty anyways. stack_.back() = c10::impl::GenericDict(AnyType::get(), AnyType::get()); }); } else if (module_name == "torch" && class_name == "device") { @@ -971,7 +638,7 @@ void LanternUnpickler::readGlobal( // like the other branches here because no REDUCE or BUILD will // be called on this value. Instead, we just put it on the stack // and return early - c10::optional scalar_type; + std::optional scalar_type; #define CHECK_SCALAR(_, name) \ if (class_name == #name "Storage") { \ scalar_type = c10::k##name; \ @@ -983,7 +650,7 @@ void LanternUnpickler::readGlobal( return; } - c10::optional qscheme; + std::optional qscheme; for (int i = 0; i < at::COMPILE_TIME_NUM_QSCHEMES; ++i) { if (class_name == toString(static_cast(i))) { qscheme = static_cast(i); @@ -1033,100 +700,461 @@ void LanternUnpickler::readGlobal( stack_.emplace_back(int64_t(globals_.size() - 1)); } -void LanternUnpickler::rebuildSparseTensor() { - globals_.emplace_back([this] { - auto tup = pop(stack_).toTuple(); - const auto& elements = tup->elements(); - size_t idx = 0; - auto layout = elements.at(idx++).toInt(); - at::Tensor result; - switch (layout) { - case static_cast(c10::Layout::Sparse): { - std::vector size = tupleToIntList(elements.at(idx++)); - bool requires_grad = elements.at(idx++).toBool(); - auto& indices_tensor = elements.at(idx++).toTensor(); - auto& values_tensor = elements.at(idx++).toTensor(); - auto options = values_tensor.options() - .layout(c10::Layout::Sparse) - .requires_grad(requires_grad); - result = at::_sparse_coo_tensor_unsafe( - indices_tensor, values_tensor, size, options); - result = autograd::make_variable(result, options.requires_grad()); - break; - } - case static_cast(c10::Layout::SparseCsr): { - std::vector size = tupleToIntList(elements.at(idx++)); - bool requires_grad = elements.at(idx++).toBool(); - auto& crow_indices = elements.at(idx++).toTensor(); - auto& col_indices = elements.at(idx++).toTensor(); - auto& values_tensor = elements.at(idx++).toTensor(); - auto options = values_tensor.options() - .layout(c10::Layout::SparseCsr) - .requires_grad(requires_grad); - result = at::_sparse_csr_tensor_unsafe( - crow_indices, col_indices, values_tensor, size, options); - result = - autograd::make_variable(std::move(result), options.requires_grad()); - break; +PickleOpCode LanternUnpickler::readInstruction() { + auto opcode = readOpCode(); + switch (opcode) { + case PickleOpCode::EMPTY_LIST: { + stack_.emplace_back(c10::impl::GenericList(AnyType::get())); + } break; + case PickleOpCode::EMPTY_TUPLE: { + if (empty_tuple_.isNone()) { + // we only need one object, since tuples are not mutable. + empty_tuple_ = c10::ivalue::Tuple::create(std::vector()); } - default: - TORCH_CHECK( - false, - "Unsupported sparse tensor layout type in serialization ", - static_cast(layout)); - break; - } - stack_.emplace_back(std::move(result)); - }); -} - -void LanternUnpickler::rebuildTensor(bool quantized) { - globals_.emplace_back([this, quantized] { - auto tup = pop(stack_).toTuple(); - const auto& elements = tup->elements(); - size_t idx = 0; - auto& storage_tensor = elements.at(idx++).toTensor(); - int64_t storage_offset = elements.at(idx++).toInt(); - std::vector size = tupleToIntList(elements.at(idx++)); - std::vector stride = tupleToIntList(elements.at(idx++)); - at::Tensor result; - if (quantized) { - auto qparams_tuple = elements.at(idx++).toTuple(); - const auto& qparams = qparams_tuple->elements(); - auto qscheme = static_cast(qparams.at(0).toInt()); - switch (qscheme) { - case at::kPerTensorAffine: { - double q_scale = qparams.at(1).toDouble(); - int64_t q_zero_point = qparams.at(2).toInt(); - result = at::_empty_affine_quantized( - {0}, storage_tensor.options(), q_scale, q_zero_point); - } break; - case at::kPerChannelAffineFloatQParams: - case at::kPerChannelAffine: { - const auto& scales = qparams.at(1).toTensor(); - const auto& zero_points = qparams.at(2).toTensor(); - int64_t axis = qparams.at(3).toInt(); - result = at::_empty_per_channel_affine_quantized( - {0}, scales, zero_points, axis, storage_tensor.options()); - } break; - default: - TORCH_CHECK( - false, - "Unsupported tensor quantization type in serialization ", - toString(qscheme)); + stack_.emplace_back(empty_tuple_); + } break; + case PickleOpCode::BINPUT: { + size_t memo_id = read(); + setInput(memo_id); + } break; + case PickleOpCode::LONG_BINPUT: { + TORCH_CHECK( + std::numeric_limits::max() >= + std::numeric_limits::max(), + "Found a LONG_BINPUT opcode, but size_t on this system is " + "not big enough to decode it"); + size_t memo_id = read(); + setInput(memo_id); + } break; + case PickleOpCode::MARK: { + // Mark location of the container ivalue in the stack + marks_.push_back(stack_.size()); + } break; + case PickleOpCode::NEWTRUE: { + stack_.emplace_back(true); + } break; + case PickleOpCode::NEWFALSE: { + stack_.emplace_back(false); + } break; + case PickleOpCode::NONE: { + stack_.emplace_back(); + } break; + case PickleOpCode::BININT1: { + uint8_t value = read(); + stack_.emplace_back(int64_t(value)); + } break; + case PickleOpCode::BININT2: { + uint16_t value = from_le16(read()); + stack_.emplace_back(int64_t(value)); + } break; + case PickleOpCode::BININT: { + int32_t value = from_le32(read()); + stack_.emplace_back(int64_t(value)); + } break; + case PickleOpCode::LONG1: { + // Only read LONG1s with 8 as the length + uint8_t length = read(); + TORCH_CHECK(length == 8, "Expected length to be 8, got ", int(length)); + stack_.emplace_back(int64_t(from_le64(read()))); + } break; + case PickleOpCode::BINUNICODE: { + uint32_t length = from_le32(read()); + stack_.emplace_back(readBytes(length)); + } break; + case PickleOpCode::BINUNICODE8: { + int64_t length = from_le64(read()); + stack_.emplace_back(readBytes(length)); + } break; + case PickleOpCode::BINFLOAT: + stack_.emplace_back(readFloat()); + break; + case PickleOpCode::TUPLE: { + TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); + size_t start = marks_.back(); + marks_.pop_back(); + std::vector elements; + TORCH_CHECK( + stack_.size() >= start, + "Parsing error: wrong start index ", + start, + " for stack_ of size ", + stack_.size()); + const auto tupleSize = stack_.size() - start; + switch (tupleSize) { + case 3: { + auto e3 = pop(stack_); + auto e2 = pop(stack_); + auto e1 = pop(stack_); + stack_.emplace_back(c10::ivalue::Tuple::create( + std::move(e1), std::move(e2), std::move(e3))); + break; + } + case 2: { + auto e2 = pop(stack_); + auto e1 = pop(stack_); + stack_.emplace_back( + c10::ivalue::Tuple::create(std::move(e1), std::move(e2))); break; + } + case 1: + stack_.emplace_back(c10::ivalue::Tuple::create(pop(stack_))); + break; + default: { + elements.reserve(stack_.size() - start); + auto start_it = stack_.begin() + static_cast(start); + for (auto it = start_it; it != stack_.end(); ++it) { + elements.emplace_back(std::move(*it)); + } + stack_.erase(start_it, stack_.end()); + stack_.emplace_back(c10::ivalue::Tuple::create(std::move(elements))); + break; + } } - } else { - result = at::empty({0}, storage_tensor.options()); + } break; + case PickleOpCode::TUPLE1: { + TORCH_CHECK( + !stack_.empty(), + "Parsing error: stack_ contains ", + stack_.size(), + " elements, at least 1 expected"); + stack_.emplace_back(c10::ivalue::Tuple::create(pop(stack_))); + } break; + case PickleOpCode::TUPLE2: { + TORCH_CHECK( + stack_.size() > 1, + "Parsing error: stack_ contains ", + stack_.size(), + " elements, at least 2 expected"); + auto e2 = pop(stack_); + auto e1 = pop(stack_); + stack_.emplace_back( + c10::ivalue::Tuple::create(std::move(e1), std::move(e2))); + } break; + case PickleOpCode::TUPLE3: { + TORCH_CHECK( + stack_.size() > 2, + "Parsing error: stack_ contains ", + stack_.size(), + " elements, at least 3 expected"); + auto e3 = pop(stack_); + auto e2 = pop(stack_); + auto e1 = pop(stack_); + stack_.emplace_back(c10::ivalue::Tuple::create( + std::move(e1), std::move(e2), std::move(e3))); + } break; + case PickleOpCode::EMPTY_DICT: + stack_.emplace_back( + c10::impl::GenericDict(AnyType::get(), AnyType::get())); + break; + case PickleOpCode::APPENDS: { + TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); + size_t start = marks_.back(); + TORCH_CHECK( + start > 0 && start <= stack_.size(), + "Parsing error: wrong start index ", + start, + " for stack_ of size ", + stack_.size()); + auto list_ivalue = stack_.at(start - 1); + readList(list_ivalue); + } break; + case PickleOpCode::APPEND: { + TORCH_CHECK( + stack_.size() >= 2, "Parsing error: missing elements in stack_."); + auto list_ivalue = stack_.at(stack_.size() - 2); + readListElements(list_ivalue, stack_.size() - 1); + } break; + case PickleOpCode::LIST: { + IValue list_ivalue = c10::impl::GenericList(AnyType::get()); + readList(list_ivalue); + stack_.push_back(std::move(list_ivalue)); + } break; + case PickleOpCode::DICT: { + TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); + size_t start = marks_.back(); + marks_.pop_back(); + TORCH_CHECK( + stack_.size() > start, + "Parsing error: wrong start index ", + start, + " for stack_ which of size ", + stack_.size()); + auto dict = c10::impl::GenericDict(AnyType::get(), AnyType::get()); + TORCH_CHECK( + (stack_.size() - start) % 2 == 0, + "Parsing error: stack_ is of size ", + stack_.size(), + " and start index is ", + start, + ", but stack_ is iterated by two elements at a time"); + for (size_t i = start; i < stack_.size(); i += 2) { + dict.insert_or_assign(stack_[i], stack_[i + 1]); + } + stack_.erase( + stack_.begin() + static_cast(start), stack_.end()); + stack_.emplace_back(std::move(dict)); + } break; + case PickleOpCode::SETITEMS: { + TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); + size_t start = marks_.back(); + marks_.pop_back(); + TORCH_CHECK( + start > 0 && start <= stack_.size(), + "Parsing error: wrong start index for stack_"); + auto dict = stack_.at(start - 1).toGenericDict(); + TORCH_CHECK( + (stack_.size() - start) % 2 == 0, + "Parsing error: stack_ is of size ", + stack_.size(), + " and start index is ", + start, + ", but stack_ is iterated by two elemenst at a time"); + for (size_t i = start; i < stack_.size(); i += 2) { + dict.insert_or_assign(stack_[i], stack_[i + 1]); + } + stack_.erase( + stack_.begin() + static_cast(start), stack_.end()); + } break; + case PickleOpCode::BINGET: { + auto pos = read(); + TORCH_CHECK( + memo_table_.size() > pos, + "Parsing error: out of bounds access at ", + (size_t)pos, + " to memo_table_ which is of size ", + memo_table_.size()); + stack_.push_back(memo_table_.at(pos)); + } break; + case PickleOpCode::LONG_BINGET: { + auto pos = read(); + TORCH_CHECK( + memo_table_.size() > pos, + "Parsing error: out of bounds access at ", + (size_t)pos, + " to memo_table_ which is of size ", + memo_table_.size()); + stack_.push_back(memo_table_.at(pos)); + } break; + case PickleOpCode::STOP: + break; + case PickleOpCode::GLOBAL: { + // Module name, it's not needed for anything + auto module_name = readString(); + auto class_name = readString(); + readGlobal(module_name, class_name); + } break; + case PickleOpCode::NEWOBJ: { + TORCH_CHECK(!stack_.empty(), "Parsing error: stack_ is empty"); + // pop empty tuple, the actual action is stored in the globals_stack_ + stack_.pop_back(); + } break; + // because we have NEWOBJ do nothing, BUILD and REDUCE end up doing + // the same thing + case PickleOpCode::BUILD: + case PickleOpCode::REDUCE: { + // stack is: + // extract and remove from the stack: + TORCH_CHECK( + stack_.size() > 1, + "Parsing error: stack_ contains ", + stack_.size(), + " elements, at least 2 expected"); + + // In the OrderedDict case, the id has already been materialized + // and added to the stack, thus there's no but a Dict + // there, in this case we can just pop the functor args and break. + // The functor args in this case contain some other metadata like + // '{_metadata: {: {version: 1}}}' which seem to be safe to ignore. + if (stack_.at(stack_.size() - 2).isGenericDict()) { + stack_.pop_back(); + break; + } + + std::swap(*(stack_.end() - 2), *(stack_.end() - 1)); + size_t idx = stack_.back().toInt(); + stack_.pop_back(); + // stack is: + TORCH_CHECK( + idx < globals_.size(), + "Parsing error: out of bounds access to globals_"); + globals_.at(idx)(); + } break; + case PickleOpCode::BINPERSID: { + TORCH_CHECK(!stack_.empty(), "Parsing error: stack_ is empty"); + auto tuple = pop(stack_).toTuple(); + const auto& args = tuple->elements(); + AT_ASSERT( + args.at(0).toStringRef() == "storage", + "unknown PERSID key ", + args.at(0).toStringRef()); + at::ScalarType type = args.at(1).toScalarType(); + const std::string& key = args.at(2).toStringRef(); + + at::Device device(args.at(3).toStringRef()); + // remap device location if it's not meta + if (device_ && !device.is_meta()) { + device = *device_; + } + + at::Storage storage; + if (storage_context_ != nullptr && storage_context_->hasStorage(key)) { + // for torch.package logic where storage may be loaded already + storage = storage_context_->getStorage(key); + } else { + int64_t numel = args.at(4).toInt(); + auto dtype = scalarTypeToTypeMeta(type); + + at::DataPtr storage_ptr; + if (numel > 0) { + // If there are no elements in the tensor, there's no point in + // reading a zero (0) byte file from the input stream and paying + // that cost. + storage_ptr = read_record_(key); + } + + storage = at::Storage( + c10::Storage::use_byte_size_t(), + numel * dtype.itemsize(), + std::move(storage_ptr), + /*allocator=*/nullptr, + /*resizable=*/false); // NB: we didn't set any allocator for the + // tensor + if (storage_context_ != nullptr) { + storage_context_->addStorage(key, storage); + } + } + + auto options = at::device(at::kCPU).dtype(type); + if (use_storage_device_) { + options = options.device(storage.device()); + device = storage.device(); + } + + at::Tensor tensor; + if (options.backend() == c10::Backend::QuantizedCPU) { + tensor = at::_empty_affine_quantized({}, options, 0, 0) + .set_(storage, 0, {}, {}); + } else { + tensor = at::empty({0}, options).set_(storage); + } + + if (device.is_cuda() || device.is_xpu() || device.is_meta() || + device.is_hpu() || device.is_mps() || device.is_privateuseone()) { + tensor = tensor.to(device, tensor.scalar_type()); + } else if (device.type() != DeviceType::CPU) { + TORCH_CHECK( + false, + "supported devices include CPU, CUDA, HPU and ", + c10::get_privateuse1_backend(), + " however got ", + DeviceTypeName(device.type(), false)); + } + stack_.emplace_back(std::move(tensor)); + } break; + case PickleOpCode::SETITEM: { + // At this OpCode, stack looks like + // | Stack Bottom | + // | ...... | + // | Dict | -> (stack_size - 3) + // | Key | -> (stack_size - 2) + // | Value | -> (stack_size - 1) + TORCH_CHECK( + stack_.size() >= 3, + "Parsing error: stack doesn't have enough elements"); + + auto stack_size = stack_.size(); + auto dict_pos = stack_size - 3; + auto key_pos = stack_size - 2; + auto val_pos = stack_size - 1; + + TORCH_CHECK( + (dict_pos < stack_size) && (key_pos < stack_size) && + (val_pos < stack_size), + "Parsing error: attempted out-of-bounds access while processing SETITEM opcode"); + + auto dict = stack_.at(dict_pos).toGenericDict(); + dict.insert_or_assign(stack_.at(key_pos), stack_.at(val_pos)); + stack_.erase( + stack_.begin() + static_cast(key_pos), stack_.end()); + } break; + default: { + TORCH_CHECK( + false, + "Unknown opcode for unpickling at ", + // NOLINTNEXTLINE(performance-no-int-to-ptr) + reinterpret_cast(opcode), + ": ", + int(static_cast(opcode))); + } break; + } + return opcode; +} + +void LanternUnpickler::run() { + // Expect a PROTO opcode and protocol number at the start of blob + auto opcode = readOpCode(); + TORCH_CHECK( + opcode == PickleOpCode::PROTO, + "Expected PROTO opcode at the start" + " of pickle archive, found ", + int(static_cast(opcode))); + uint8_t protocol = read(); + TORCH_CHECK( + protocol == 2, + "Only Pickle protocol 2 is supported, found protocol = ", + protocol); + + while (true) { + PickleOpCode opcode = readInstruction(); + if (opcode == PickleOpCode::STOP) { + return; + } + } +} + +IValue LanternUnpickler::parse_ivalue() { + run(); + TORCH_CHECK( + stack_.size() == 1, + "Unpickler expected 1 element on the stack, but found ", + stack_.size()); + if (version_ <= 2) { + // See [type tag serialization] + restoreAccurateTypeTagsIfPossible(stack_[0]); + } + return stack_[0]; +} + +void LanternUnpickler::rebuildTensorFromTypeV2() { + // [NOTE] skip_next_read_global + // When rebuilding Tensor with Python Attr or Subclassed Tensor, + // we receive `(func, type(self), args, state)` on stack for + // `rebuildTensorFromTypeV2`. + // Thus next call to readGlobal corresponds to `func` which is + // the function to rebuild the base tensor. + // The call after `func` to readGlobal corresponds to `type` of the + // Tensor where we raise warning if the type is not `torch.Tensor`. + this->skip_next_read_global = 2; + auto curr_globals_idx = globals_.size(); + globals_.emplace_back([this, curr_globals_idx] { + // args is a tuple with following data + // (function to rebuild base tensor, type of tensor, + // arguments to construct base tensor, Python State (as dict)) + auto args = pop(stack_).toTuple(); + size_t tup_idx = 0; + const auto args_elems = args->elements(); + auto base_tensor_args = args_elems.at(tup_idx + 2).toTuple(); + auto py_state = args_elems.at(tup_idx + 3).toGenericDict(); + if (!py_state.empty()) { + TORCH_WARN( + "Loading Tensor with Python attributes will return at::Tensor with Python attributes being discarded"); } - bool requires_grad = elements.at(idx).toBool(); - // elements[idx++] is empty backwards hooks - at::TensorImpl* impl = result.unsafeGetTensorImpl(); - impl->set_storage_keep_dtype(storage_tensor.storage()); - impl->set_storage_offset(storage_offset); - impl->set_sizes_and_strides(size, stride); - result = autograd::make_variable(result, requires_grad); - stack_.emplace_back(std::move(result)); + // This calls the function to rebuild the + // base tensor. + // Eg. `rebuildTensor`, `rebuildSpareTensor`. + stack_.emplace_back(base_tensor_args); + globals_[curr_globals_idx + 1](); + stack_.emplace_back(pop(stack_)); }); } @@ -1187,7 +1215,7 @@ void LanternUnpickler::readSlowWithBuffer(char* dest, size_t sz) { AT_ASSERT(sz <= buffer_.size()); buffer_remaining_ = reader_(buffer_.data(), buffer_.size()); if (buffer_remaining_ < needed) { - AT_ERROR("Unexpected end of pickler archive."); + TORCH_CHECK(false, "Unexpected end of pickler archive."); } memcpy(dest + from_old_buf, buffer_.data(), needed); buffer_pos_ = needed; // assignment (0'ed from read) @@ -1198,6 +1226,11 @@ void LanternUnpickler::readSlowWithBuffer(char* dest, size_t sz) { std::string LanternUnpickler::readBytes(size_t length) { std::string data; static const size_t kSmallString = 64; + TORCH_CHECK( + length <= data.max_size(), + "Parsing error: can't read ", + length, + " bytes to a string"); if (length <= buffer_remaining_) { // Fast-path: entirely in buffer. data.assign(buffer_.data() + buffer_pos_, length); @@ -1220,7 +1253,7 @@ std::string LanternUnpickler::readBytes(size_t length) { const size_t needed = length - from_old_buf; size_t nread = reader_(&data[from_old_buf], needed); if (nread != needed) { - AT_ERROR("Unexpected end of pickler archive."); + TORCH_CHECK(false, "Unexpected end of pickler archive."); } buffer_remaining_ = 0; // buffer_pos_ has no meaning with buffer_remaining_ == 0. @@ -1228,12 +1261,7 @@ std::string LanternUnpickler::readBytes(size_t length) { return data; } -// Pop all the list items off of the stack and append them to the list at -// the corresponding MARK -void LanternUnpickler::readList(IValue list_ivalue) { - TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); - size_t start = marks_.back(); - marks_.pop_back(); +void LanternUnpickler::readListElements(IValue list_ivalue, size_t start) { auto num_elements = stack_.size() - start; auto elements = c10::ArrayRef(stack_).slice(start); if (list_ivalue.isIntList()) { @@ -1267,236 +1295,169 @@ void LanternUnpickler::readList(IValue list_ivalue) { list.emplace_back(elem); } } else { - AT_ERROR("Unknown IValue list kind: ", list_ivalue.tagKind()); - } - - stack_.erase(stack_.begin() + start, stack_.end()); -} - -inline bool is_valid_python_id_char(char c) { - return c == '_' || c == '.' || (c >= '0' && c <= '9') || - (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); -} - -// Read a newline terminated string -std::string LanternUnpickler::readString() { - std::string ss; - while (true) { - auto* const bufferStart = buffer_.data() + buffer_pos_; - const auto bufferLeft = buffer_.size() - buffer_pos_; - char* const newlinePtr = - static_cast(memchr(bufferStart, '\n', bufferLeft)); - if (newlinePtr) { - // read up to newline and we are done. - auto const charsRead = newlinePtr - bufferStart; - ss.append(bufferStart, charsRead); - buffer_remaining_ -= charsRead + 1; - buffer_pos_ += charsRead + 1; - break; - } else { - // read whole buffer, refill - for (const char* p = bufferStart; p < bufferStart + bufferLeft; ++p) { - // Simple check just in case there is no terminating '\n' - TORCH_CHECK( - is_valid_python_id_char(*p), - "Found character '", - int(uint8_t(*p)), - "' in string, ", - "strings must be qualified Python identifiers"); - } - ss.append(bufferStart, bufferLeft); - buffer_remaining_ = reader_(buffer_.data(), buffer_.size()); - buffer_pos_ = 0; - } + TORCH_CHECK(false, "Unknown IValue list kind: ", list_ivalue.tagKind()); } - return ss; + stack_.erase( + stack_.begin() + static_cast(start), stack_.end()); } -} // namespace jit -} // namespace torch - -#include -#include - -namespace torch { -namespace jit { - -IValue readArchiveAndTensors( - const std::string& archive_name, - const std::string& pickle_prefix, - const std::string& tensor_prefix, - c10::optional type_resolver, - c10::optional obj_loader, - c10::optional device, - caffe2::serialize::PyTorchStreamReader& stream_reader, - c10::TypePtr (*type_parser)(const std::string&), - std::shared_ptr storage_context) { - std::string picklename = pickle_prefix + archive_name + ".pkl"; - at::DataPtr pickle_ptr; - size_t pickle_size = 0; - std::tie(pickle_ptr, pickle_size) = stream_reader.getRecord(picklename); - - size_t bytes_read = 0; - auto data = reinterpret_cast(pickle_ptr.get()); - auto reader = [&](char* buffer, size_t len) -> size_t { - if (bytes_read >= pickle_size) { - return 0; - } - len = std::min(pickle_size - bytes_read, len); - // Copy len bytes into buffer - const char* start = data + bytes_read; - std::memcpy(buffer, start, len); - bytes_read += len; - return len; - }; - - std::string tensor_dir_path = - (tensor_prefix.compare("") != 0) ? tensor_prefix : archive_name + "/"; - - auto read_record = [&](const std::string& name) { - std::string ss = tensor_dir_path + name; - return std::get<0>(stream_reader.getRecord(ss)); - }; - - LanternUnpickler unpickler( - reader, - type_resolver ? std::move(*type_resolver) : nullptr, - obj_loader ? std::move(*obj_loader) : nullptr, - std::move(read_record), - device, - false, - type_parser, - storage_context); - unpickler.set_version(stream_reader.version()); - return unpickler.parse_ivalue(); -} -} +// Pop all the list items off of the stack and append them to the list at +// the corresponding MARK +void LanternUnpickler::readList(IValue list_ivalue) { + TORCH_CHECK(!marks_.empty(), "Parsing error: marks_ is empty"); + size_t start = marks_.back(); + marks_.pop_back(); + readListElements(std::move(list_ivalue), start); } -#include - -#include -#include -#include -#include -#include +double LanternUnpickler::readFloat() { + AT_ASSERT(sizeof(double) == 8); + double big_endian = read(); +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + double little_endian = 0; -namespace torch { -namespace jit { + // Pickle floats are big endian, so reverse the bytes + auto big_endian_ptr = reinterpret_cast(&big_endian); + std::reverse_copy( + big_endian_ptr, + big_endian_ptr + sizeof(big_endian), + reinterpret_cast(&little_endian)); -void pickle( - std::function writer, - const IValue& ivalue, - std::vector* tensor_table) { - Pickler pickler(std::move(writer), tensor_table, nullptr, nullptr); - pickler.protocol(); - pickler.pushIValue(ivalue); - pickler.stop(); + return little_endian; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return big_endian; +#else +#error Unexpected or undefined __BYTE_ORDER__ +#endif } -std::vector pickle( - const IValue& ivalue, - std::vector* tensor_table) { - std::vector data; - - pickle( - [&](const char* bytes, size_t len) { - data.insert(data.end(), bytes, bytes + len); - }, - ivalue, - tensor_table); - - return data; +static std::vector tupleToIntList(const IValue& v) { + return fmap(v.toTupleRef().elements(), [](const IValue& v) -> int64_t { + return v.toInt(); + }); } -#ifndef C10_MOBILE -class VectorReader : public caffe2::serialize::ReadAdapterInterface { - public: - VectorReader(std::vector data) : data_(std::move(data)) {} - - size_t size() const override { - return data_.size(); - } - - size_t read(uint64_t pos, void* buf, size_t n, const char* what) - const override { - std::copy( - data_.data() + pos, - data_.data() + pos + n, - reinterpret_cast(buf)); - return n; - } - - private: - std::vector data_; -}; -#endif +void LanternUnpickler::rebuildTensor(bool quantized) { + globals_.emplace_back([this, quantized] { + auto tup = pop(stack_).toTuple(); + const auto& elements = tup->elements(); + size_t idx = 0; + auto& storage_tensor = elements.at(idx++).toTensor(); + int64_t storage_offset = elements.at(idx++).toInt(); + std::vector size = tupleToIntList(elements.at(idx++)); + std::vector stride = tupleToIntList(elements.at(idx++)); + at::Tensor result; + if (quantized) { + auto qparams_tuple = elements.at(idx++).toTuple(); + const auto& qparams = qparams_tuple->elements(); + auto qscheme = static_cast(qparams.at(0).toInt()); + switch (qscheme) { + case at::kPerTensorAffine: { + double q_scale = qparams.at(1).toDouble(); + int64_t q_zero_point = qparams.at(2).toInt(); + result = at::_empty_affine_quantized( + {0}, storage_tensor.options(), q_scale, q_zero_point); + } break; + case at::kPerChannelAffineFloatQParams: + case at::kPerChannelAffine: { + const auto& scales = qparams.at(1).toTensor(); + const auto& zero_points = qparams.at(2).toTensor(); + int64_t axis = qparams.at(3).toInt(); + result = at::_empty_per_channel_affine_quantized( + {0}, scales, zero_points, axis, storage_tensor.options()); + } break; + default: + TORCH_CHECK( + false, + "Unsupported tensor quantization type in serialization ", + toString(qscheme)); + break; + } + } else { + result = at::empty({0}, storage_tensor.options()); + } + bool requires_grad = elements.at(idx++).toBool(); + idx++; // backwards hooks is empty + at::TensorImpl* impl = result.unsafeGetTensorImpl(); + impl->set_storage_keep_dtype(storage_tensor.storage()); + impl->set_storage_offset(storage_offset); + impl->set_sizes_and_strides(size, stride); + result = autograd::make_variable(result, requires_grad); -IValue pickle_load2(const std::vector& data) { - // Read in the pickle data -#ifndef C10_MOBILE - caffe2::serialize::PyTorchStreamReader reader( - std::make_unique(data)); - - return readArchiveAndTensors( - "data", - /*pickle_prefix=*/"", - /*tensor_prefix=*/"", - /*type_resolver=*/c10::nullopt, - /*obj_loader=*/c10::nullopt, - /*device=*/c10::nullopt, - reader); -#else - AT_ERROR( - "pickle_load not supported on mobile " - "(see https://github.com/pytorch/pytorch/pull/30108)"); -#endif -} + // Handle if math_bits were pickled. + // See `args` of _reduce_ex_internal + // for a regular tensor (final else case). + // Tensors pickled before this patch didn't + // have this argument for storing MathBits, + // in that case, we do nothing. + // NOTE: `math_bits` is the 7th arg. + // NOTE: This is only meant for regular tensor and not quantized + // which also has 7 args serialized. + if (!quantized && elements.size() == 7) { + auto math_bits = elements.at(idx++).toGenericDict(); + torch::jit::setTensorMetadata(result, math_bits); + } -IValue pickle_load3(std::string path) { - caffe2::serialize::PyTorchStreamReader reader(path); - return readArchiveAndTensors( - "data", - /*pickle_prefix=*/"", - /*tensor_prefix=*/"", - /*type_resolver=*/c10::nullopt, - /*obj_loader=*/c10::nullopt, - /*device=*/c10::nullopt, - reader); + stack_.emplace_back(std::move(result)); + }); } -IValue unpickle( - std::function reader, - TypeResolver type_resolver, - c10::ArrayRef tensor_table, - c10::TypePtr (*type_parser)(const std::string&)) { - LanternUnpickler unpickler( - std::move(reader), std::move(type_resolver), tensor_table, type_parser); - return unpickler.parse_ivalue(); +void LanternUnpickler::rebuildSparseTensor() { + globals_.emplace_back([this] { + auto tup = pop(stack_).toTuple(); + const auto& elements = tup->elements(); + size_t idx = 0; + auto layout = elements.at(idx++).toInt(); + at::Tensor result; + switch (layout) { + case static_cast(c10::Layout::Sparse): { + std::vector size = tupleToIntList(elements.at(idx++)); + bool requires_grad = elements.at(idx++).toBool(); + auto& indices_tensor = elements.at(idx++).toTensor(); + auto& values_tensor = elements.at(idx++).toTensor(); + auto options = values_tensor.options() + .layout(c10::Layout::Sparse) + .requires_grad(requires_grad); + result = at::_sparse_coo_tensor_unsafe( + indices_tensor, values_tensor, size, options); + result = autograd::make_variable(result, options.requires_grad()); + break; + } + case static_cast(c10::Layout::SparseCsr): { + std::vector size = tupleToIntList(elements.at(idx++)); + bool requires_grad = elements.at(idx++).toBool(); + auto& crow_indices = elements.at(idx++).toTensor(); + auto& col_indices = elements.at(idx++).toTensor(); + auto& values_tensor = elements.at(idx++).toTensor(); + auto options = values_tensor.options() + .layout(c10::Layout::SparseCsr) + .requires_grad(requires_grad); + result = at::_sparse_csr_tensor_unsafe( + crow_indices, col_indices, values_tensor, size, options); + result = + autograd::make_variable(std::move(result), options.requires_grad()); + break; + } + default: + TORCH_CHECK( + false, + "Unsupported sparse tensor layout type in serialization ", + static_cast(layout)); + break; + } + stack_.emplace_back(std::move(result)); + }); } -IValue unpickle( - const char* data, - size_t size, - TypeResolver type_resolver, - c10::ArrayRef tensor_table, - c10::TypePtr (*type_parser)(const std::string&)) { - size_t bytes_read = 0; - return unpickle( - [&](char* buffer, size_t len) -> size_t { - if (bytes_read >= size) { - return 0; - } - len = std::min(size - bytes_read, len); - // Copy len bytes into buffer - const char* start = data + bytes_read; - std::memcpy(buffer, start, len); - bytes_read += len; - return len; - }, - std::move(type_resolver), - tensor_table, - type_parser); +void LanternUnpickler::setInput(size_t memo_id) { + AT_ASSERT(!stack_.empty()); + if (memo_id >= memo_table_.size()) { + memo_table_.insert( + memo_table_.end(), memo_id - memo_table_.size(), IValue()); + memo_table_.push_back(stack_.back()); + } else { + memo_table_[memo_id] = stack_.back(); + } } } // namespace jit diff --git a/src/lantern/src/Unpickler.h b/src/lantern/src/Unpickler.h new file mode 100644 index 0000000000..bd764d5506 --- /dev/null +++ b/src/lantern/src/Unpickler.h @@ -0,0 +1,19 @@ +#include + +#ifdef _WIN32 +#define API __declspec(dllexport) +#else +#define API +#endif + +namespace torch { +namespace jit { + +API IValue lantern_read_pickle( + const std::string& archive_name, + caffe2::serialize::PyTorchStreamReader& stream_reader); + +} // namespace jit +} // namespace torch + +#undef API \ No newline at end of file diff --git a/src/lantern/src/lantern.cpp b/src/lantern/src/lantern.cpp index 72edf9ce88..7db0f3cf2e 100644 --- a/src/lantern/src/lantern.cpp +++ b/src/lantern/src/lantern.cpp @@ -1,11 +1,8 @@ +#include #include #define LANTERN_BUILD - #include "lantern/lantern.h" - -#include - #include "utils.hpp" int lanternLogEnabled = 0; @@ -287,14 +284,94 @@ void* _lantern__assert_async_tensor(void* self) LANTERN_FUNCTION_END } +void* _lantern__assert_async_tensor_cstringview(void* self, void* assert_msg) +{ + LANTERN_FUNCTION_START + torch::_assert_async(from_raw::Tensor(self), from_raw::string_view(assert_msg)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__assert_scalar_scalar_cstringview(void* self, void* assert_msg) +{ + LANTERN_FUNCTION_START + torch::_assert_scalar(from_raw::Scalar(self), from_raw::string_view(assert_msg)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__functional_assert_scalar_scalar_cstringview_tensor(void* self, void* assert_msg, void* dep_token) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_functional_assert_scalar( + from_raw::Scalar(self), from_raw::string_view(assert_msg), from_raw::Tensor(dep_token))); + LANTERN_FUNCTION_END +} + +void* _lantern__functional_assert_async_tensor_cstringview_tensor(void* self, void* assert_msg, void* dep_token) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_functional_assert_async( + from_raw::Tensor(self), from_raw::string_view(assert_msg), from_raw::Tensor(dep_token))); + LANTERN_FUNCTION_END +} + void* _lantern__assert_tensor_metadata_tensor_intarrayref_intarrayref_scalartype(void* a, void* size, void* stride, void* dtype) { LANTERN_FUNCTION_START - torch::_assert_tensor_metadata(from_raw::Tensor(a), from_raw::IntArrayRef(size), from_raw::IntArrayRef(stride), from_raw::optional::ScalarType(dtype)); + torch::_assert_tensor_metadata(from_raw::Tensor(a), from_raw::optional::IntArrayRef(size), from_raw::optional::IntArrayRef(stride), from_raw::optional::ScalarType(dtype)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__print_cstringview(void* s) +{ + LANTERN_FUNCTION_START + torch::_print(from_raw::string_view(s)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern_sym_constrain_range_scalar_intt_intt(void* size, void* min, void* max) +{ + LANTERN_FUNCTION_START + torch::sym_constrain_range(from_raw::Scalar(size), from_raw::optional::int64_t(min), from_raw::optional::int64_t(max)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern_sym_constrain_range_for_size_scalar_intt_intt(void* size, void* min, void* max) +{ + LANTERN_FUNCTION_START + torch::sym_constrain_range_for_size(from_raw::Scalar(size), from_raw::optional::int64_t(min), from_raw::optional::int64_t(max)); return NULL; LANTERN_FUNCTION_END } +void* _lantern__functional_sym_constrain_range_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_functional_sym_constrain_range( + from_raw::Scalar(size), from_raw::optional::int64_t(min), from_raw::optional::int64_t(max), from_raw::Tensor(dep_token))); + LANTERN_FUNCTION_END +} + +void* _lantern__functional_sym_constrain_range_for_size_scalar_intt_intt_tensor(void* size, void* min, void* max, void* dep_token) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_functional_sym_constrain_range_for_size( + from_raw::Scalar(size), from_raw::optional::int64_t(min), from_raw::optional::int64_t(max), from_raw::Tensor(dep_token))); + LANTERN_FUNCTION_END +} + +void* _lantern__make_dep_token_tensoroptions_memoryformat(void* options, void* memory_format) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_make_dep_token( + from_raw::TensorOptions(options), from_raw::optional::MemoryFormat(memory_format))); + LANTERN_FUNCTION_END +} + void* _lantern_Tensor_refine_names_tensor_dimnamelist(void* self, void* names) { LANTERN_FUNCTION_START @@ -1159,6 +1236,14 @@ void* _lantern__test_check_tensor_tensor(void* self) LANTERN_FUNCTION_END } +void* _lantern__test_functorch_fallback_tensor_tensor(void* self, void* other) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_test_functorch_fallback( + from_raw::Tensor(self), from_raw::Tensor(other))); + LANTERN_FUNCTION_END +} + void* _lantern_all_tensor_intt_bool(void* self, void* dim, void* keepdim) { LANTERN_FUNCTION_START @@ -1175,6 +1260,22 @@ void* _lantern_Tensor_all_tensor_intt_bool(void* self, void* dim, void* keepdim) LANTERN_FUNCTION_END } +void* _lantern_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::all( + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor_all_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self).all( + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + void* _lantern_all_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_FUNCTION_START @@ -1183,6 +1284,14 @@ void* _lantern_all_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, LANTERN_FUNCTION_END } +void* _lantern_all_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::all_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + void* _lantern_all_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_FUNCTION_START @@ -1239,6 +1348,22 @@ void* _lantern_Tensor_any_tensor_intt_bool(void* self, void* dim, void* keepdim) LANTERN_FUNCTION_END } +void* _lantern_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::any( + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor_any_tensor_intarrayref_bool(void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self).any( + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + void* _lantern_any_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, void* keepdim) { LANTERN_FUNCTION_START @@ -1247,6 +1372,14 @@ void* _lantern_any_out_tensor_tensor_intt_bool(void* out, void* self, void* dim, LANTERN_FUNCTION_END } +void* _lantern_any_out_tensor_tensor_intarrayref_bool(void* out, void* self, void* dim, void* keepdim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::any_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); + LANTERN_FUNCTION_END +} + void* _lantern_any_tensor_dimname_bool(void* self, void* dim, void* keepdim) { LANTERN_FUNCTION_START @@ -2143,6 +2276,22 @@ void* _lantern_copysign_out_tensor_tensor_scalar(void* out, void* self, void* ot LANTERN_FUNCTION_END } +void* _lantern__lazy_clone_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_lazy_clone( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor__lazy_clone_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._lazy_clone( + )); + LANTERN_FUNCTION_END +} + void* _lantern_logical_not_tensor(void* self) { LANTERN_FUNCTION_START @@ -2971,7 +3120,7 @@ void* _lantern_convolution_backward_tensor_tensor_tensor_intarrayref_intarrayref { LANTERN_FUNCTION_START return make_raw::tuple(torch::convolution_backward( - from_raw::Tensor(grad_output), from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::IntArrayRef(bias_sizes), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(transposed), from_raw::IntArrayRef(output_padding), from_raw::int64_t(groups), from_raw::vector::bool_t(output_mask))); + from_raw::Tensor(grad_output), from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::optional::IntArrayRef(bias_sizes), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(transposed), from_raw::IntArrayRef(output_padding), from_raw::int64_t(groups), from_raw::vector::bool_t(output_mask))); LANTERN_FUNCTION_END } @@ -3335,6 +3484,14 @@ void* _lantern_cudnn_convolution_tensor_tensor_intarrayref_intarrayref_intarrayr LANTERN_FUNCTION_END } +void* _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::cudnn_convolution_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(weight), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::bool_t(benchmark), from_raw::bool_t(deterministic), from_raw::bool_t(allow_tf32))); + LANTERN_FUNCTION_END +} + void* _lantern_cudnn_convolution_transpose_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_FUNCTION_START @@ -4227,7 +4384,7 @@ void* _lantern_einsum_cstringview_tensorlist_intarrayref(void* equation, void* t { LANTERN_FUNCTION_START return make_raw::Tensor(torch::einsum( - from_raw::string_view(equation), from_raw::TensorList(tensors), from_raw::IntArrayRef(path))); + from_raw::string_view(equation), from_raw::TensorList(tensors), from_raw::optional::IntArrayRef(path))); LANTERN_FUNCTION_END } @@ -4375,6 +4532,14 @@ void* _lantern_empty_intarrayref_tensoroptions_memoryformat(void* size, void* op LANTERN_FUNCTION_END } +void* _lantern_empty_permuted_intarrayref_intarrayref_tensoroptions(void* size, void* physical_layout, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::empty_permuted( + from_raw::IntArrayRef(size), from_raw::IntArrayRef(physical_layout), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + void* _lantern_Tensor_new_empty_tensor_intarrayref_tensoroptions(void* self, void* size, void* options) { LANTERN_FUNCTION_START @@ -5343,39 +5508,39 @@ void* _lantern__validate_compressed_sparse_indices_bool_tensor_tensor_intt_intt_ LANTERN_FUNCTION_END } -void* _lantern__cufft_get_plan_cache_size_intt(void* device_index) +void* _lantern__cufft_get_plan_cache_size_deviceindex(void* device_index) { LANTERN_FUNCTION_START return make_raw::int64_t(torch::_cufft_get_plan_cache_size( - from_raw::int64_t(device_index))); + from_raw::DeviceIndex(device_index))); LANTERN_FUNCTION_END } -void* _lantern__cufft_get_plan_cache_max_size_intt(void* device_index) +void* _lantern__cufft_get_plan_cache_max_size_deviceindex(void* device_index) { LANTERN_FUNCTION_START return make_raw::int64_t(torch::_cufft_get_plan_cache_max_size( - from_raw::int64_t(device_index))); + from_raw::DeviceIndex(device_index))); LANTERN_FUNCTION_END } -void* _lantern__cufft_set_plan_cache_max_size_intt_intt(void* device_index, void* max_size) +void* _lantern__cufft_set_plan_cache_max_size_deviceindex_intt(void* device_index, void* max_size) { LANTERN_FUNCTION_START - torch::_cufft_set_plan_cache_max_size(from_raw::int64_t(device_index), from_raw::int64_t(max_size)); + torch::_cufft_set_plan_cache_max_size(from_raw::DeviceIndex(device_index), from_raw::int64_t(max_size)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__cufft_clear_plan_cache_intt(void* device_index) +void* _lantern__cufft_clear_plan_cache_deviceindex(void* device_index) { LANTERN_FUNCTION_START - torch::_cufft_clear_plan_cache(from_raw::int64_t(device_index)); + torch::_cufft_clear_plan_cache(from_raw::DeviceIndex(device_index)); return NULL; LANTERN_FUNCTION_END } -void* _lantern_index_tensor_constclistcoptionaltensor(void* self, void* indices) +void* _lantern_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::index( @@ -5383,7 +5548,7 @@ void* _lantern_index_tensor_constclistcoptionaltensor(void* self, void* indices) LANTERN_FUNCTION_END } -void* _lantern_Tensor_index_tensor_constclistcoptionaltensor(void* self, void* indices) +void* _lantern_Tensor_index_tensor_constcliststdoptionaltensor(void* self, void* indices) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).index( @@ -5391,7 +5556,7 @@ void* _lantern_Tensor_index_tensor_constclistcoptionaltensor(void* self, void* i LANTERN_FUNCTION_END } -void* _lantern_index_out_tensor_tensor_constclistcoptionaltensor(void* out, void* self, void* indices) +void* _lantern_index_out_tensor_tensor_constcliststdoptionaltensor(void* out, void* self, void* indices) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::index_out( @@ -5399,6 +5564,30 @@ void* _lantern_index_out_tensor_tensor_constclistcoptionaltensor(void* out, void LANTERN_FUNCTION_END } +void* _lantern__unsafe_index_tensor_constcliststdoptionaltensor(void* self, void* indices) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_unsafe_index( + from_raw::Tensor(self), from_raw::optional::TensorList(indices))); + LANTERN_FUNCTION_END +} + +void* _lantern__unsafe_masked_index_tensor_tensor_constcliststdoptionaltensor_scalar(void* self, void* mask, void* indices, void* fill) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_unsafe_masked_index( + from_raw::Tensor(self), from_raw::Tensor(mask), from_raw::optional::TensorList(indices), from_raw::Scalar(fill))); + LANTERN_FUNCTION_END +} + +void* _lantern__unsafe_masked_index_put_accumulate_tensor_tensor_constcliststdoptionaltensor_tensor(void* self, void* mask, void* indices, void* values) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_unsafe_masked_index_put_accumulate( + from_raw::Tensor(self), from_raw::Tensor(mask), from_raw::optional::TensorList(indices), from_raw::Tensor(values))); + LANTERN_FUNCTION_END +} + void* _lantern_index_copy_out_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* dim, void* index, void* source) { LANTERN_FUNCTION_START @@ -5455,7 +5644,7 @@ void* _lantern_Tensor_index_copy_tensor_dimname_tensor_tensor(void* self, void* LANTERN_FUNCTION_END } -void* _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) +void* _lantern_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::index_put_( @@ -5463,7 +5652,7 @@ void* _lantern_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* sel LANTERN_FUNCTION_END } -void* _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) +void* _lantern_Tensor_index_put__tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).index_put_( @@ -5471,7 +5660,7 @@ void* _lantern_Tensor_index_put__tensor_constclistcoptionaltensor_tensor_bool(vo LANTERN_FUNCTION_END } -void* _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) +void* _lantern_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::index_put( @@ -5479,7 +5668,7 @@ void* _lantern_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self LANTERN_FUNCTION_END } -void* _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) +void* _lantern_Tensor_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).index_put( @@ -5487,7 +5676,15 @@ void* _lantern_Tensor_index_put_tensor_constclistcoptionaltensor_tensor_bool(voi LANTERN_FUNCTION_END } -void* _lantern__index_put_impl__tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) +void* _lantern__unsafe_index_put_tensor_constcliststdoptionaltensor_tensor_bool(void* self, void* indices, void* values, void* accumulate) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_unsafe_index_put( + from_raw::Tensor(self), from_raw::optional::TensorList(indices), from_raw::Tensor(values), from_raw::bool_t(accumulate))); + LANTERN_FUNCTION_END +} + +void* _lantern__index_put_impl__tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_index_put_impl_( @@ -5863,6 +6060,14 @@ void* _lantern_native_layer_norm_backward_tensor_tensor_intarrayref_tensor_tenso LANTERN_FUNCTION_END } +void* _lantern_rms_norm_tensor_intarrayref_tensor_double(void* input, void* normalized_shape, void* weight, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::rms_norm( + from_raw::Tensor(input), from_raw::IntArrayRef(normalized_shape), from_raw::optional::Tensor(weight), from_raw::optional::double_t(eps))); + LANTERN_FUNCTION_END +} + void* _lantern_nan_to_num_tensor_double_double_double(void* self, void* nan, void* posinf, void* neginf) { LANTERN_FUNCTION_START @@ -5959,6 +6164,86 @@ void* _lantern_mkldnn_linear_backward_tensor_tensor_tensor_stdarraybool(void* se LANTERN_FUNCTION_END } +void* _lantern__cslt_compress_tensor(void* input) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_cslt_compress( + from_raw::Tensor(input))); + LANTERN_FUNCTION_END +} + +void* _lantern__cslt_sparse_mm_tensor_tensor_tensor_tensor_scalartype_bool_intt(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result, void* alg_id) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_cslt_sparse_mm( + from_raw::Tensor(compressed_A), from_raw::Tensor(dense_B), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(alpha), from_raw::optional::ScalarType(out_dtype), from_raw::bool_t(transpose_result), from_raw::int64_t(alg_id))); + LANTERN_FUNCTION_END +} + +void* _lantern__cslt_sparse_mm_search_tensor_tensor_tensor_tensor_scalartype_bool(void* compressed_A, void* dense_B, void* bias, void* alpha, void* out_dtype, void* transpose_result) +{ + LANTERN_FUNCTION_START + return make_raw::int64_t(torch::_cslt_sparse_mm_search( + from_raw::Tensor(compressed_A), from_raw::Tensor(dense_B), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(alpha), from_raw::optional::ScalarType(out_dtype), from_raw::bool_t(transpose_result))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_tile_tensor_cstringview_bool(void* input, void* algorithm, void* use_cutlass) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_sparse_semi_structured_tile( + from_raw::Tensor(input), from_raw::string_view(algorithm), from_raw::bool_t(use_cutlass))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_apply_tensor_tensor(void* input, void* thread_masks) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_sparse_semi_structured_apply( + from_raw::Tensor(input), from_raw::Tensor(thread_masks))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_apply_dense_tensor_tensor(void* input, void* thread_masks) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_semi_structured_apply_dense( + from_raw::Tensor(input), from_raw::Tensor(thread_masks))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_linear_tensor_tensor_tensor_tensor_cstringview_scalartype(void* input, void* weight, void* meta, void* bias, void* activation, void* out_dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_semi_structured_linear( + from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::Tensor(meta), from_raw::optional::Tensor(bias), from_raw::optional::string_view(activation), from_raw::optional::ScalarType(out_dtype))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_mm_tensor_tensor_tensor_scalartype(void* mat1, void* mat1_meta, void* mat2, void* out_dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_semi_structured_mm( + from_raw::Tensor(mat1), from_raw::Tensor(mat1_meta), from_raw::Tensor(mat2), from_raw::optional::ScalarType(out_dtype))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_semi_structured_addmm_tensor_tensor_tensor_tensor_scalar_scalar_scalartype(void* input, void* mat1, void* mat1_meta, void* mat2, void* alpha, void* beta, void* out_dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_semi_structured_addmm( + from_raw::Tensor(input), from_raw::Tensor(mat1), from_raw::Tensor(mat1_meta), from_raw::Tensor(mat2), from_raw::Scalar(alpha), from_raw::Scalar(beta), from_raw::optional::ScalarType(out_dtype))); + LANTERN_FUNCTION_END +} + +void* _lantern__mixed_dtypes_linear_tensor_tensor_tensor_tensor_cstringview(void* input, void* weight, void* scale, void* bias, void* activation) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_mixed_dtypes_linear( + from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::Tensor(scale), from_raw::optional::Tensor(bias), from_raw::optional::string_view(activation))); + LANTERN_FUNCTION_END +} + void* _lantern_fbgemm_linear_int8_weight_fp32_activation_tensor_tensor_tensor_tensor_scalar_scalar_tensor(void* input, void* weight, void* packed, void* col_offsets, void* weight_scale, void* weight_zero_point, void* bias) { LANTERN_FUNCTION_START @@ -5991,6 +6276,22 @@ void* _lantern_fbgemm_pack_gemm_matrix_fp16_tensor(void* input) LANTERN_FUNCTION_END } +void* _lantern__wrapped_linear_prepack_tensor_tensor_tensor_tensor(void* weight, void* weight_scale, void* weight_zero_point, void* bias) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_wrapped_linear_prepack( + from_raw::Tensor(weight), from_raw::Tensor(weight_scale), from_raw::Tensor(weight_zero_point), from_raw::Tensor(bias))); + LANTERN_FUNCTION_END +} + +void* _lantern__wrapped_quantized_linear_prepacked_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* input, void* input_scale, void* input_zero_point, void* packed_weight, void* output_scale, void* output_zero_point, void* out_channel) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_wrapped_quantized_linear_prepacked( + from_raw::Tensor(input), from_raw::Tensor(input_scale), from_raw::Tensor(input_zero_point), from_raw::Tensor(packed_weight), from_raw::Tensor(output_scale), from_raw::Tensor(output_zero_point), from_raw::int64_t(out_channel))); + LANTERN_FUNCTION_END +} + void* _lantern_fbgemm_linear_fp16_weight_fp32_activation_tensor_tensor_tensor(void* input, void* packed_weight, void* bias) { LANTERN_FUNCTION_START @@ -6071,6 +6372,30 @@ void* _lantern_linspace_scalar_scalar_intt_tensoroptions(void* start, void* end, LANTERN_FUNCTION_END } +void* _lantern_linspace_tensor_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace( + from_raw::Tensor(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + +void* _lantern_linspace_tensor_scalar_intt_tensoroptions(void* start, void* end, void* steps, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace( + from_raw::Tensor(start), from_raw::Scalar(end), from_raw::int64_t(steps), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + +void* _lantern_linspace_scalar_tensor_intt_tensoroptions(void* start, void* end, void* steps, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace( + from_raw::Scalar(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + void* _lantern_linspace_out_tensor_scalar_scalar_intt(void* out, void* start, void* end, void* steps) { LANTERN_FUNCTION_START @@ -6079,6 +6404,30 @@ void* _lantern_linspace_out_tensor_scalar_scalar_intt(void* out, void* start, vo LANTERN_FUNCTION_END } +void* _lantern_linspace_out_tensor_tensor_tensor_intt(void* out, void* start, void* end, void* steps) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace_out( + from_raw::Tensor(out), from_raw::Tensor(start), from_raw::Tensor(end), from_raw::int64_t(steps))); + LANTERN_FUNCTION_END +} + +void* _lantern_linspace_out_tensor_tensor_scalar_intt(void* out, void* start, void* end, void* steps) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace_out( + from_raw::Tensor(out), from_raw::Tensor(start), from_raw::Scalar(end), from_raw::int64_t(steps))); + LANTERN_FUNCTION_END +} + +void* _lantern_linspace_out_tensor_scalar_tensor_intt(void* out, void* start, void* end, void* steps) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::linspace_out( + from_raw::Tensor(out), from_raw::Scalar(start), from_raw::Tensor(end), from_raw::int64_t(steps))); + LANTERN_FUNCTION_END +} + void* _lantern_log_tensor(void* self) { LANTERN_FUNCTION_START @@ -6391,15 +6740,63 @@ void* _lantern_logspace_scalar_scalar_intt_double_tensoroptions(void* start, voi LANTERN_FUNCTION_END } -void* _lantern_logspace_out_tensor_scalar_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) +void* _lantern_logspace_tensor_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::logspace_out( - from_raw::Tensor(out), from_raw::Scalar(start), from_raw::Scalar(end), from_raw::int64_t(steps), from_raw::double_t(base))); + return make_raw::Tensor(torch::logspace( + from_raw::Tensor(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::double_t(base), from_raw::TensorOptions(options))); LANTERN_FUNCTION_END } -void* _lantern_log_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) +void* _lantern_logspace_tensor_scalar_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace( + from_raw::Tensor(start), from_raw::Scalar(end), from_raw::int64_t(steps), from_raw::double_t(base), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + +void* _lantern_logspace_scalar_tensor_intt_double_tensoroptions(void* start, void* end, void* steps, void* base, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace( + from_raw::Scalar(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::double_t(base), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + +void* _lantern_logspace_out_tensor_scalar_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace_out( + from_raw::Tensor(out), from_raw::Scalar(start), from_raw::Scalar(end), from_raw::int64_t(steps), from_raw::double_t(base))); + LANTERN_FUNCTION_END +} + +void* _lantern_logspace_out_tensor_tensor_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace_out( + from_raw::Tensor(out), from_raw::Tensor(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::double_t(base))); + LANTERN_FUNCTION_END +} + +void* _lantern_logspace_out_tensor_tensor_scalar_intt_double(void* out, void* start, void* end, void* steps, void* base) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace_out( + from_raw::Tensor(out), from_raw::Tensor(start), from_raw::Scalar(end), from_raw::int64_t(steps), from_raw::double_t(base))); + LANTERN_FUNCTION_END +} + +void* _lantern_logspace_out_tensor_scalar_tensor_intt_double(void* out, void* start, void* end, void* steps, void* base) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::logspace_out( + from_raw::Tensor(out), from_raw::Scalar(start), from_raw::Tensor(end), from_raw::int64_t(steps), from_raw::double_t(base))); + LANTERN_FUNCTION_END +} + +void* _lantern_log_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::log_softmax( @@ -6887,6 +7284,14 @@ void* _lantern_quantized_max_pool2d_tensor_intarrayref_intarrayref_intarrayref_i LANTERN_FUNCTION_END } +void* _lantern_quantized_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::quantized_max_pool3d( + from_raw::Tensor(self), from_raw::IntArrayRef(kernel_size), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(ceil_mode))); + LANTERN_FUNCTION_END +} + void* _lantern_max_pool3d_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) { LANTERN_FUNCTION_START @@ -6911,11 +7316,19 @@ void* _lantern_Tensor_mean_tensor_scalartype(void* self, void* dtype) LANTERN_FUNCTION_END } +void* _lantern_mean_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::mean_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::ScalarType(dtype))); + LANTERN_FUNCTION_END +} + void* _lantern_mean_tensor_intarrayref_bool_scalartype(void* self, void* dim, void* keepdim, void* dtype) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::mean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -6923,7 +7336,7 @@ void* _lantern_Tensor_mean_tensor_intarrayref_bool_scalartype(void* self, void* { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).mean( - from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -6931,7 +7344,7 @@ void* _lantern_mean_out_tensor_tensor_intarrayref_bool_scalartype(void* out, voi { LANTERN_FUNCTION_START return make_raw::Tensor(torch::mean_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -6963,7 +7376,7 @@ void* _lantern_nanmean_tensor_intarrayref_bool_scalartype(void* self, void* dim, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::nanmean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -6971,7 +7384,7 @@ void* _lantern_Tensor_nanmean_tensor_intarrayref_bool_scalartype(void* self, voi { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).nanmean( - from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -6979,7 +7392,7 @@ void* _lantern_nanmean_out_tensor_tensor_intarrayref_bool_scalartype(void* out, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::nanmean_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -7319,6 +7732,46 @@ void* _lantern_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) LANTERN_FUNCTION_END } +void* _lantern__int_mm_tensor_tensor(void* self, void* mat2) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_int_mm( + from_raw::Tensor(self), from_raw::Tensor(mat2))); + LANTERN_FUNCTION_END +} + +void* _lantern__int_mm_out_tensor_tensor_tensor(void* out, void* self, void* mat2) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_int_mm_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(mat2))); + LANTERN_FUNCTION_END +} + +void* _lantern__convert_weight_to_int4pack_tensor_intt(void* self, void* innerKTiles) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_convert_weight_to_int4pack( + from_raw::Tensor(self), from_raw::int64_t(innerKTiles))); + LANTERN_FUNCTION_END +} + +void* _lantern__weight_int4pack_mm_tensor_tensor_intt_tensor(void* self, void* mat2, void* qGroupSize, void* qScaleAndZeros) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_weight_int4pack_mm( + from_raw::Tensor(self), from_raw::Tensor(mat2), from_raw::int64_t(qGroupSize), from_raw::Tensor(qScaleAndZeros))); + LANTERN_FUNCTION_END +} + +void* _lantern__weight_int8pack_mm_tensor_tensor_tensor(void* self, void* mat2, void* scales) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_weight_int8pack_mm( + from_raw::Tensor(self), from_raw::Tensor(mat2), from_raw::Tensor(scales))); + LANTERN_FUNCTION_END +} + void* _lantern__sparse_mm_tensor_tensor(void* sparse, void* dense) { LANTERN_FUNCTION_START @@ -7639,6 +8092,14 @@ void* _lantern__native_batch_norm_legit_tensor_tensor_tensor_tensor_tensor_bool_ LANTERN_FUNCTION_END } +void* _lantern__native_batch_norm_legit_no_training_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_native_batch_norm_legit_no_training( + from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::Tensor(running_mean), from_raw::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + void* _lantern__native_batch_norm_legit_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_double(void* out, void* save_mean, void* save_invstd, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* training, void* momentum, void* eps) { LANTERN_FUNCTION_START @@ -7719,11 +8180,11 @@ void* _lantern_batch_norm_backward_reduce_tensor_tensor_tensor_tensor_tensor_boo LANTERN_FUNCTION_END } -void* _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) +void* _lantern_batch_norm_backward_elemt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::batch_norm_backward_elemt( - from_raw::Tensor(grad_out), from_raw::Tensor(input), from_raw::Tensor(mean), from_raw::Tensor(invstd), from_raw::optional::Tensor(weight), from_raw::Tensor(mean_dy), from_raw::Tensor(mean_dy_xmu), from_raw::Tensor(count))); + from_raw::Tensor(grad_out), from_raw::Tensor(input), from_raw::Tensor(mean), from_raw::Tensor(invstd), from_raw::optional::Tensor(weight), from_raw::Tensor(sum_dy), from_raw::Tensor(sum_dy_xmu), from_raw::Tensor(count))); LANTERN_FUNCTION_END } @@ -9399,6 +9860,22 @@ void* _lantern_slice_backward_tensor_intarrayref_intt_intt_intt_intt(void* grad_ LANTERN_FUNCTION_END } +void* _lantern_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::slice_inverse( + from_raw::Tensor(self), from_raw::Tensor(src), from_raw::int64_t(dim), from_raw::optional::int64_t(start), from_raw::optional::int64_t(end), from_raw::int64_t(step))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor_slice_inverse_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self).slice_inverse( + from_raw::Tensor(src), from_raw::int64_t(dim), from_raw::optional::int64_t(start), from_raw::optional::int64_t(end), from_raw::int64_t(step))); + LANTERN_FUNCTION_END +} + void* _lantern_slice_scatter_tensor_tensor_intt_intt_intt_intt(void* self, void* src, void* dim, void* start, void* end, void* step) { LANTERN_FUNCTION_START @@ -9847,6 +10324,22 @@ void* _lantern_sspaddmm_out_tensor_tensor_tensor_tensor_scalar_scalar(void* out, LANTERN_FUNCTION_END } +void* _lantern__chunk_cat_tensorlist_intt_intt(void* tensors, void* dim, void* num_chunks) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_chunk_cat( + from_raw::TensorList(tensors), from_raw::int64_t(dim), from_raw::int64_t(num_chunks))); + LANTERN_FUNCTION_END +} + +void* _lantern__chunk_cat_out_tensor_tensorlist_intt_intt(void* out, void* tensors, void* dim, void* num_chunks) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_chunk_cat_out( + from_raw::Tensor(out), from_raw::TensorList(tensors), from_raw::int64_t(dim), from_raw::int64_t(num_chunks))); + LANTERN_FUNCTION_END +} + void* _lantern_stack_tensorlist_intt(void* tensors, void* dim) { LANTERN_FUNCTION_START @@ -10027,7 +10520,7 @@ void* _lantern_sum_tensor_intarrayref_bool_scalartype(void* self, void* dim, voi { LANTERN_FUNCTION_START return make_raw::Tensor(torch::sum( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10035,7 +10528,7 @@ void* _lantern_Tensor_sum_tensor_intarrayref_bool_scalartype(void* self, void* d { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).sum( - from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10059,7 +10552,7 @@ void* _lantern_sum_out_tensor_tensor_intarrayref_bool_scalartype(void* out, void { LANTERN_FUNCTION_START return make_raw::Tensor(torch::sum_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10075,7 +10568,7 @@ void* _lantern__nested_sum_backward_tensor_tensor_intarrayref_bool(void* grad, v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_nested_sum_backward( - from_raw::Tensor(grad), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim))); + from_raw::Tensor(grad), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10083,7 +10576,7 @@ void* _lantern_nansum_tensor_intarrayref_bool_scalartype(void* self, void* dim, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::nansum( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10091,7 +10584,7 @@ void* _lantern_Tensor_nansum_tensor_intarrayref_bool_scalartype(void* self, void { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).nansum( - from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10099,7 +10592,7 @@ void* _lantern_nansum_out_tensor_tensor_intarrayref_bool_scalartype(void* out, v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::nansum_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -10211,7 +10704,7 @@ void* _lantern_std_tensor_intarrayref_bool_bool(void* self, void* dim, void* unb { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10219,23 +10712,23 @@ void* _lantern_Tensor_std_tensor_intarrayref_bool_bool(void* self, void* dim, vo { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).std( - from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_Tensor_std_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_Tensor_std_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).std( - from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10251,15 +10744,15 @@ void* _lantern_std_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void { LANTERN_FUNCTION_START return make_raw::tuple(torch::std_mean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_std_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::std_mean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10271,11 +10764,11 @@ void* _lantern_std_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void LANTERN_FUNCTION_END } -void* _lantern_std_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::std_mean( - from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10283,15 +10776,15 @@ void* _lantern_std_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_std_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10319,27 +10812,27 @@ void* _lantern_std_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self LANTERN_FUNCTION_END } -void* _lantern_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std( - from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_Tensor_std_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_Tensor_std_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).std( - from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_std_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::std_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -10807,10 +11300,10 @@ void* _lantern_Tensor__nested_tensor_strides_tensor(void* self) LANTERN_FUNCTION_END } -void* _lantern_Tensor__nested_tensor_offsets_tensor(void* self) +void* _lantern_Tensor__nested_tensor_storage_offsets_tensor(void* self) { LANTERN_FUNCTION_START - return make_raw::IntArrayRef(from_raw::Tensor(self)._nested_tensor_offsets( + return make_raw::Tensor(from_raw::Tensor(self)._nested_tensor_storage_offsets( )); LANTERN_FUNCTION_END } @@ -10823,19 +11316,107 @@ void* _lantern__nested_from_padded_and_nested_example_tensor_tensor(void* padded LANTERN_FUNCTION_END } -void* _lantern__nested_view_from_buffer_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) +void* _lantern__nested_view_from_buffer_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_nested_view_from_buffer( - from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::IntArrayRef(offsets))); + from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::Tensor(offsets))); LANTERN_FUNCTION_END } -void* _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_intarrayref(void* self, void* nested_size, void* nested_strides, void* offsets) +void* _lantern__nested_view_from_buffer_copy_tensor_tensor_tensor_tensor(void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_nested_view_from_buffer_copy( - from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::IntArrayRef(offsets))); + from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::Tensor(offsets))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_view_from_jagged_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_view_from_jagged( + from_raw::Tensor(self), from_raw::Tensor(offsets), from_raw::Tensor(dummy), from_raw::optional::Tensor(lengths), from_raw::int64_t(ragged_idx), from_raw::optional::Tensor(min_seqlen), from_raw::optional::Tensor(max_seqlen))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_view_from_jagged_copy_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_view_from_jagged_copy( + from_raw::Tensor(self), from_raw::Tensor(offsets), from_raw::Tensor(dummy), from_raw::optional::Tensor(lengths), from_raw::int64_t(ragged_idx), from_raw::optional::Tensor(min_seqlen), from_raw::optional::Tensor(max_seqlen))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_values_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_values( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_values_copy_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_values_copy( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_offsets_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_offsets( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_lengths_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_lengths( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_ragged_idx_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::int64_t(torch::_nested_get_ragged_idx( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_min_seqlen_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_min_seqlen( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_max_seqlen_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_max_seqlen( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_jagged_dummy_tensor(void* any) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_jagged_dummy( + from_raw::Tensor(any))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_compute_contiguous_strides_offsets_tensor(void* nested_size) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_nested_compute_contiguous_strides_offsets( + from_raw::Tensor(nested_size))); LANTERN_FUNCTION_END } @@ -11051,7 +11632,7 @@ void* _lantern_var_tensor_intarrayref_bool_bool(void* self, void* dim, void* unb { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11059,23 +11640,23 @@ void* _lantern_Tensor_var_tensor_intarrayref_bool_bool(void* self, void* dim, vo { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).var( - from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_Tensor_var_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_Tensor_var_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).var( - from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11083,15 +11664,15 @@ void* _lantern_var_out_tensor_tensor_intarrayref_bool_bool(void* out, void* self { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_var_out_tensor_tensor_intarrayref_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_out_tensor_tensor_intarrayref_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11119,27 +11700,27 @@ void* _lantern_var_out_tensor_tensor_dimnamelist_bool_bool(void* out, void* self LANTERN_FUNCTION_END } -void* _lantern_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var( - from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_Tensor_var_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_Tensor_var_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).var( - from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_var_out_tensor_tensor_dimnamelist_intt_bool(void* out, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_out_tensor_tensor_dimnamelist_scalar_bool(void* out, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::var_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11155,15 +11736,15 @@ void* _lantern_var_mean_tensor_intarrayref_bool_bool(void* self, void* dim, void { LANTERN_FUNCTION_START return make_raw::tuple(torch::var_mean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(unbiased), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } -void* _lantern_var_mean_tensor_intarrayref_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_mean_tensor_intarrayref_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::var_mean( - from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11175,11 +11756,11 @@ void* _lantern_var_mean_tensor_dimnamelist_bool_bool(void* self, void* dim, void LANTERN_FUNCTION_END } -void* _lantern_var_mean_tensor_dimnamelist_intt_bool(void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_mean_tensor_dimnamelist_scalar_bool(void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::var_mean( - from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(self), from_raw::DimnameList(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -11399,6 +11980,38 @@ void* _lantern_native_norm_tensor_scalar_intarrayref_bool_scalartype(void* self, LANTERN_FUNCTION_END } +void* _lantern__batch_norm_with_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_batch_norm_with_update( + from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::Tensor(running_mean), from_raw::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + +void* _lantern__batch_norm_with_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out, void* save_mean, void* save_invstd, void* reserve, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_batch_norm_with_update_out( + from_raw::Tensor(out), from_raw::Tensor(save_mean), from_raw::Tensor(save_invstd), from_raw::Tensor(reserve), from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::Tensor(running_mean), from_raw::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + +void* _lantern__batch_norm_no_update_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_batch_norm_no_update( + from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(running_mean), from_raw::optional::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + +void* _lantern_batch_norm_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_double_stdarraybool_tensor(void* grad_out, void* input, void* weight, void* running_mean, void* running_var, void* save_mean, void* save_var, void* update, void* eps, void* output_mask, void* reserve) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::batch_norm_backward( + from_raw::Tensor(grad_out), from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::optional::Tensor(running_mean), from_raw::optional::Tensor(running_var), from_raw::optional::Tensor(save_mean), from_raw::optional::Tensor(save_var), from_raw::bool_t(update), from_raw::double_t(eps), from_raw::vector::bool_t(output_mask), from_raw::Tensor(reserve))); + LANTERN_FUNCTION_END +} + void* _lantern__sparse_sum_tensor(void* self) { LANTERN_FUNCTION_START @@ -12063,6 +12676,30 @@ void* _lantern_Tensor__addmm_activation_tensor_tensor_tensor_scalar_scalar_bool( LANTERN_FUNCTION_END } +void* _lantern__scaled_mm_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_scaled_mm( + from_raw::Tensor(self), from_raw::Tensor(mat2), from_raw::Tensor(scale_a), from_raw::Tensor(scale_b), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(scale_result), from_raw::optional::ScalarType(out_dtype), from_raw::bool_t(use_fast_accum))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_mm_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_scalartype_bool(void* out, void* self, void* mat2, void* scale_a, void* scale_b, void* bias, void* scale_result, void* out_dtype, void* use_fast_accum) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_scaled_mm_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(mat2), from_raw::Tensor(scale_a), from_raw::Tensor(scale_b), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(scale_result), from_raw::optional::ScalarType(out_dtype), from_raw::bool_t(use_fast_accum))); + LANTERN_FUNCTION_END +} + +void* _lantern__sparse_compressed_tensor_with_dims_intt_intt_intarrayref_intarrayref_scalartype_tensoroptions(void* nnz, void* dense_dim, void* size, void* blocksize, void* index_dtype, void* options) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_compressed_tensor_with_dims( + from_raw::int64_t(nnz), from_raw::int64_t(dense_dim), from_raw::IntArrayRef(size), from_raw::IntArrayRef(blocksize), from_raw::ScalarType(index_dtype), from_raw::TensorOptions(options))); + LANTERN_FUNCTION_END +} + void* _lantern_sparse_compressed_tensor_tensor_tensor_tensor_intarrayref_tensoroptions(void* compressed_indices, void* plain_indices, void* values, void* size, void* options) { LANTERN_FUNCTION_START @@ -12191,34 +12828,34 @@ void* _lantern_sparse_coo_tensor_intarrayref_tensoroptions(void* size, void* opt LANTERN_FUNCTION_END } -void* _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions(void* indices, void* values, void* options) +void* _lantern_sparse_coo_tensor_tensor_tensor_tensoroptions_bool(void* indices, void* values, void* options, void* is_coalesced) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::sparse_coo_tensor( - from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::TensorOptions(options))); + from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::TensorOptions(options), from_raw::optional::bool_t(is_coalesced))); LANTERN_FUNCTION_END } -void* _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) +void* _lantern_sparse_coo_tensor_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::sparse_coo_tensor( - from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size), from_raw::TensorOptions(options))); + from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size), from_raw::TensorOptions(options), from_raw::optional::bool_t(is_coalesced))); LANTERN_FUNCTION_END } -void* _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions(void* indices, void* values, void* size, void* options) +void* _lantern__sparse_coo_tensor_unsafe_tensor_tensor_intarrayref_tensoroptions_bool(void* indices, void* values, void* size, void* options, void* is_coalesced) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_sparse_coo_tensor_unsafe( - from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size), from_raw::TensorOptions(options))); + from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size), from_raw::TensorOptions(options), from_raw::optional::bool_t(is_coalesced))); LANTERN_FUNCTION_END } -void* _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref(void* indices, void* values, void* size) +void* _lantern__validate_sparse_coo_tensor_args_tensor_tensor_intarrayref_bool(void* indices, void* values, void* size, void* is_coalesced) { LANTERN_FUNCTION_START - torch::_validate_sparse_coo_tensor_args(from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size)); + torch::_validate_sparse_coo_tensor_args(from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::IntArrayRef(size), from_raw::optional::bool_t(is_coalesced)); return NULL; LANTERN_FUNCTION_END } @@ -12271,11 +12908,11 @@ void* _lantern__sparse_coo_tensor_with_dims_intt_intt_intarrayref_tensoroptions( LANTERN_FUNCTION_END } -void* _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options) +void* _lantern__sparse_coo_tensor_with_dims_and_tensors_intt_intt_intarrayref_tensor_tensor_tensoroptions_bool(void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* options, void* is_coalesced) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_sparse_coo_tensor_with_dims_and_tensors( - from_raw::int64_t(sparse_dim), from_raw::int64_t(dense_dim), from_raw::IntArrayRef(size), from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::TensorOptions(options))); + from_raw::int64_t(sparse_dim), from_raw::int64_t(dense_dim), from_raw::IntArrayRef(size), from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::TensorOptions(options), from_raw::optional::bool_t(is_coalesced))); LANTERN_FUNCTION_END } @@ -12303,6 +12940,14 @@ void* _lantern_Tensor_sparse_mask_tensor_tensor(void* self, void* mask) LANTERN_FUNCTION_END } +void* _lantern_Tensor__sparse_mask_projection_tensor_tensor_bool(void* self, void* mask, void* accumulate_matches) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._sparse_mask_projection( + from_raw::Tensor(mask), from_raw::bool_t(accumulate_matches))); + LANTERN_FUNCTION_END +} + void* _lantern__to_cpu_tensorlist(void* tensors) { LANTERN_FUNCTION_START @@ -12311,27 +12956,27 @@ void* _lantern__to_cpu_tensorlist(void* tensors) LANTERN_FUNCTION_END } -void* _lantern_Tensor_to_dense_tensor_scalartype(void* self, void* dtype) +void* _lantern_Tensor_to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).to_dense( - from_raw::optional::ScalarType(dtype))); + from_raw::optional::ScalarType(dtype), from_raw::optional::bool_t(masked_grad))); LANTERN_FUNCTION_END } -void* _lantern_Tensor__to_dense_tensor_scalartype(void* self, void* dtype) +void* _lantern_Tensor__to_dense_tensor_scalartype_bool(void* self, void* dtype, void* masked_grad) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self)._to_dense( - from_raw::optional::ScalarType(dtype))); + from_raw::optional::ScalarType(dtype), from_raw::optional::bool_t(masked_grad))); LANTERN_FUNCTION_END } -void* _lantern_to_dense_backward_tensor_tensor(void* grad, void* input) +void* _lantern_to_dense_backward_tensor_tensor_bool(void* grad, void* input, void* masked_grad) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::to_dense_backward( - from_raw::Tensor(grad), from_raw::Tensor(input))); + from_raw::Tensor(grad), from_raw::Tensor(input), from_raw::optional::bool_t(masked_grad))); LANTERN_FUNCTION_END } @@ -12535,11 +13180,27 @@ void* _lantern_Tensor_to_sparse_tensor_intt(void* self, void* sparse_dim) LANTERN_FUNCTION_END } +void* _lantern_Tensor__to_sparse_tensor_intt(void* self, void* sparse_dim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse( + from_raw::int64_t(sparse_dim))); + LANTERN_FUNCTION_END +} + void* _lantern_Tensor_to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).to_sparse( - from_raw::optional::Layout(layout), from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); + from_raw::optional::Layout(layout), from_raw::optional::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor__to_sparse_tensor_layout_intarrayref_intt(void* self, void* layout, void* blocksize, void* dense_dim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse( + from_raw::optional::Layout(layout), from_raw::optional::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } @@ -12551,6 +13212,14 @@ void* _lantern_Tensor_to_sparse_csr_tensor_intt(void* self, void* dense_dim) LANTERN_FUNCTION_END } +void* _lantern_Tensor__to_sparse_csr_tensor_intt(void* self, void* dense_dim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse_csr( + from_raw::optional::int64_t(dense_dim))); + LANTERN_FUNCTION_END +} + void* _lantern_Tensor_to_sparse_csc_tensor_intt(void* self, void* dense_dim) { LANTERN_FUNCTION_START @@ -12559,6 +13228,14 @@ void* _lantern_Tensor_to_sparse_csc_tensor_intt(void* self, void* dense_dim) LANTERN_FUNCTION_END } +void* _lantern_Tensor__to_sparse_csc_tensor_intt(void* self, void* dense_dim) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse_csc( + from_raw::optional::int64_t(dense_dim))); + LANTERN_FUNCTION_END +} + void* _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START @@ -12567,39 +13244,63 @@ void* _lantern_Tensor_to_sparse_bsr_tensor_intarrayref_intt(void* self, void* bl LANTERN_FUNCTION_END } -void* _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) +void* _lantern_Tensor__to_sparse_bsr_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(from_raw::Tensor(self).to_sparse_bsc( + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse_bsr( from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_Tensor_to_mkldnn_tensor_scalartype(void* self, void* dtype) +void* _lantern_Tensor_to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(from_raw::Tensor(self).to_mkldnn( - from_raw::optional::ScalarType(dtype))); + return make_raw::Tensor(from_raw::Tensor(self).to_sparse_bsc( + from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) +void* _lantern_Tensor__to_sparse_bsc_tensor_intarrayref_intt(void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::mkldnn_reorder_conv2d_weight( - from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::IntArrayRef(input_size))); + return make_raw::Tensor(from_raw::Tensor(self)._to_sparse_bsc( + from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt(void* self, void* padding, void* stride, void* dilation, void* groups) +void* _lantern__to_sparse_semi_structured_tensor(void* dense) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::mkldnn_reorder_conv3d_weight( - from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups))); + return make_raw::tuple(torch::_to_sparse_semi_structured( + from_raw::Tensor(dense))); LANTERN_FUNCTION_END } -void* _lantern_to_mkldnn_backward_tensor_tensor(void* grad, void* input) +void* _lantern_Tensor_to_mkldnn_tensor_scalartype(void* self, void* dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self).to_mkldnn( + from_raw::optional::ScalarType(dtype))); + LANTERN_FUNCTION_END +} + +void* _lantern_mkldnn_reorder_conv2d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::mkldnn_reorder_conv2d_weight( + from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::optional::IntArrayRef(input_size))); + LANTERN_FUNCTION_END +} + +void* _lantern_mkldnn_reorder_conv3d_weight_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::mkldnn_reorder_conv3d_weight( + from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::optional::IntArrayRef(input_size))); + LANTERN_FUNCTION_END +} + +void* _lantern_to_mkldnn_backward_tensor_tensor(void* grad, void* input) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::to_mkldnn_backward( @@ -13055,11 +13756,11 @@ void* _lantern_result_type_scalar_scalar(void* scalar1, void* scalar2) LANTERN_FUNCTION_END } -void* _lantern_can_cast_scalartype_scalartype(void* from, void* to) +void* _lantern_can_cast_scalartype_scalartype(void* from_, void* to) { LANTERN_FUNCTION_START return make_raw::bool_t(torch::can_cast( - from_raw::ScalarType(from), from_raw::ScalarType(to))); + from_raw::ScalarType(from_), from_raw::ScalarType(to))); LANTERN_FUNCTION_END } @@ -13091,7 +13792,7 @@ void* _lantern_lstm_mps_backward_tensor_tensor_tensor_tensor_tensor_tensor_tenso { LANTERN_FUNCTION_START return make_raw::tuple(torch::lstm_mps_backward( - from_raw::Tensor(grad_y), from_raw::optional::Tensor(grad_hy), from_raw::optional::Tensor(grad_cy), from_raw::Tensor(z_state), from_raw::Tensor(cell_state_fwd), from_raw::Tensor(input), from_raw::Tensor(layersOutputs), from_raw::TensorList(hx), from_raw::TensorList(params), from_raw::bool_t(has_biases), from_raw::int64_t(num_layers), from_raw::double_t(dropout), from_raw::bool_t(train), from_raw::bool_t(bidirectional), from_raw::bool_t(batch_first))); + from_raw::optional::Tensor(grad_y), from_raw::optional::Tensor(grad_hy), from_raw::optional::Tensor(grad_cy), from_raw::Tensor(z_state), from_raw::Tensor(cell_state_fwd), from_raw::Tensor(input), from_raw::Tensor(layersOutputs), from_raw::TensorList(hx), from_raw::TensorList(params), from_raw::bool_t(has_biases), from_raw::int64_t(num_layers), from_raw::double_t(dropout), from_raw::bool_t(train), from_raw::bool_t(bidirectional), from_raw::bool_t(batch_first))); LANTERN_FUNCTION_END } @@ -13447,6 +14148,14 @@ void* _lantern_Tensor_masked_scatter_tensor_tensor_tensor(void* self, void* mask LANTERN_FUNCTION_END } +void* _lantern_masked_scatter_backward_tensor_tensor_intarrayref(void* grad_output, void* mask, void* sizes) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::masked_scatter_backward( + from_raw::Tensor(grad_output), from_raw::Tensor(mask), from_raw::IntArrayRef(sizes))); + LANTERN_FUNCTION_END +} + void* _lantern__masked_softmax_tensor_tensor_intt_intt(void* self, void* mask, void* dim, void* mask_type) { LANTERN_FUNCTION_START @@ -15655,6 +16364,30 @@ void* _lantern_Tensor_nonzero_tensor(void* self) LANTERN_FUNCTION_END } +void* _lantern_nonzero_static_out_tensor_tensor_intt_intt(void* out, void* self, void* size, void* fill_value) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::nonzero_static_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::int64_t(size), from_raw::int64_t(fill_value))); + LANTERN_FUNCTION_END +} + +void* _lantern_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::nonzero_static( + from_raw::Tensor(self), from_raw::int64_t(size), from_raw::int64_t(fill_value))); + LANTERN_FUNCTION_END +} + +void* _lantern_Tensor_nonzero_static_tensor_intt_intt(void* self, void* size, void* fill_value) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(from_raw::Tensor(self).nonzero_static( + from_raw::int64_t(size), from_raw::int64_t(fill_value))); + LANTERN_FUNCTION_END +} + void* _lantern_nonzero_numpy_tensor(void* self) { LANTERN_FUNCTION_START @@ -16927,6 +17660,14 @@ void* _lantern_Tensor_min_tensor(void* self) LANTERN_FUNCTION_END } +void* _lantern_min_out_tensor_tensor(void* out, void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::min_out( + from_raw::Tensor(out), from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + void* _lantern_fmin_tensor_tensor(void* self, void* other) { LANTERN_FUNCTION_START @@ -17343,6 +18084,14 @@ void* _lantern_Tensor_argsort_tensor_bool_intt_bool(void* self, void* stable, vo LANTERN_FUNCTION_END } +void* _lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::argsort_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::bool_t(stable), from_raw::int64_t(dim), from_raw::bool_t(descending))); + LANTERN_FUNCTION_END +} + void* _lantern_argsort_tensor_dimname_bool(void* self, void* dim, void* descending) { LANTERN_FUNCTION_START @@ -17751,306 +18500,306 @@ void* _lantern__foreach_add__tensorlist_scalar(void* self, void* scalar) LANTERN_FUNCTION_END } -void* _lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sub( - from_raw::TensorList(self), from_raw::Scalar(scalar))); + return make_raw::TensorList(torch::_foreach_add( + from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - torch::_foreach_sub_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_add_(from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_mul( - from_raw::TensorList(self), from_raw::Scalar(scalar))); + return make_raw::TensorList(torch::_foreach_add( + from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_mul_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_add_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add_tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_div( - from_raw::TensorList(self), from_raw::Scalar(scalar))); + return make_raw::TensorList(torch::_foreach_add( + from_raw::TensorList(self), from_raw::Tensor(other), from_raw::Scalar(alpha))); LANTERN_FUNCTION_END } -void* _lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_add__tensorlist_tensor_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - torch::_foreach_div_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_add_(from_raw::TensorList(self), from_raw::Tensor(other), from_raw::Scalar(alpha)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_clamp_min( + return make_raw::TensorList(torch::_foreach_sub( from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_clamp_min_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_sub_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_clamp_max( - from_raw::TensorList(self), from_raw::Scalar(scalar))); + return make_raw::TensorList(torch::_foreach_sub( + from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha))); LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_sub_(from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_maximum( - from_raw::TensorList(self), from_raw::Scalar(scalar))); + return make_raw::TensorList(torch::_foreach_sub( + from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_sub_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_mul_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_minimum( + return make_raw::TensorList(torch::_foreach_mul( from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) +void* _lantern__foreach_mul__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_mul_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_add_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) +void* _lantern__foreach_mul_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_add( - from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha))); + return make_raw::TensorList(torch::_foreach_mul( + from_raw::TensorList(self), from_raw::TensorList(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_add__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) +void* _lantern__foreach_mul__tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_add_(from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + torch::_foreach_mul_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sub_tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) +void* _lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sub( - from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha))); + return make_raw::TensorList(torch::_foreach_mul( + from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sub__tensorlist_tensorlist_scalar(void* self, void* other, void* alpha) +void* _lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_sub_(from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + torch::_foreach_mul_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_mul_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_mul_tensorlist_tensor(void* self, void* other) { LANTERN_FUNCTION_START return make_raw::TensorList(torch::_foreach_mul( - from_raw::TensorList(self), from_raw::TensorList(other))); + from_raw::TensorList(self), from_raw::Tensor(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_mul__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_mul__tensorlist_tensor(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_mul_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_mul_(from_raw::TensorList(self), from_raw::Tensor(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_div_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START return make_raw::TensorList(torch::_foreach_div( - from_raw::TensorList(self), from_raw::TensorList(other))); + from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_div__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_div_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_div_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_clamp_min( + return make_raw::TensorList(torch::_foreach_div( from_raw::TensorList(self), from_raw::TensorList(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div__tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_clamp_min_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_div_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_clamp_max( - from_raw::TensorList(self), from_raw::TensorList(other))); + return make_raw::TensorList(torch::_foreach_div( + from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_div_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div_tensorlist_tensor(void* self, void* other) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_maximum( - from_raw::TensorList(self), from_raw::TensorList(other))); + return make_raw::TensorList(torch::_foreach_div( + from_raw::TensorList(self), from_raw::Tensor(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_div__tensorlist_tensor(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_div_(from_raw::TensorList(self), from_raw::Tensor(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_clamp_max_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_minimum( - from_raw::TensorList(self), from_raw::TensorList(other))); + return make_raw::TensorList(torch::_foreach_clamp_max( + from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) +void* _lantern__foreach_clamp_max__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_add_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_max_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_add( - from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_clamp_max( + from_raw::TensorList(self), from_raw::TensorList(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_add__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_max__tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_add_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sub_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_max_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sub( + return make_raw::TensorList(torch::_foreach_clamp_max( from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sub__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_max__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_sub_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_div_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_min_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_div( - from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_clamp_min( + from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_div__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_min__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_div_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_clamp_min_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_mul_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_min_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_mul( - from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_clamp_min( + from_raw::TensorList(self), from_raw::TensorList(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_mul__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_clamp_min__tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_mul_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_clamp_min_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } @@ -18071,90 +18820,194 @@ void* _lantern__foreach_clamp_min__tensorlist_arrayrefscalar(void* self, void* s LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_clamp_max( - from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_maximum( + from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START return make_raw::TensorList(torch::_foreach_maximum( - from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); + from_raw::TensorList(self), from_raw::TensorList(other))); LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum__tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_minimum( + return make_raw::TensorList(torch::_foreach_maximum( from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum__tensorlist_arrayrefscalar(void* self, void* scalars) +void* _lantern__foreach_maximum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_exp_tensorlist(void* self) +void* _lantern__foreach_minimum_tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_exp( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_minimum( + from_raw::TensorList(self), from_raw::Scalar(scalar))); LANTERN_FUNCTION_END } -void* _lantern__foreach_zero__tensorlist(void* self) +void* _lantern__foreach_minimum__tensorlist_scalar(void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_zero_(from_raw::TensorList(self)); + torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_exp__tensorlist(void* self) +void* _lantern__foreach_minimum_tensorlist_tensorlist(void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_exp_(from_raw::TensorList(self)); + return make_raw::TensorList(torch::_foreach_minimum( + from_raw::TensorList(self), from_raw::TensorList(other))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_minimum__tensorlist_tensorlist(void* self, void* other) +{ + LANTERN_FUNCTION_START + torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sqrt_tensorlist(void* self) +void* _lantern__foreach_minimum_tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sqrt( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_minimum( + from_raw::TensorList(self), from_raw::vector::Scalar(scalars))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sqrt__tensorlist(void* self) +void* _lantern__foreach_minimum__tensorlist_arrayrefscalar(void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_sqrt_(from_raw::TensorList(self)); + torch::_foreach_minimum_(from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcdiv( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcdiv( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcdiv( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcmul( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcmul( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_addcmul( + from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); return NULL; LANTERN_FUNCTION_END } @@ -18303,6 +19156,22 @@ void* _lantern__foreach_erfc__tensorlist(void* self) LANTERN_FUNCTION_END } +void* _lantern__foreach_exp_tensorlist(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_exp( + from_raw::TensorList(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_exp__tensorlist(void* self) +{ + LANTERN_FUNCTION_START + torch::_foreach_exp_(from_raw::TensorList(self)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__foreach_expm1_tensorlist(void* self) { LANTERN_FUNCTION_START @@ -18335,194 +19204,218 @@ void* _lantern__foreach_floor__tensorlist(void* self) LANTERN_FUNCTION_END } -void* _lantern__foreach_log_tensorlist(void* self) +void* _lantern__foreach_frac_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_log( + return make_raw::TensorList(torch::_foreach_frac( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_log__tensorlist(void* self) +void* _lantern__foreach_frac__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_log_(from_raw::TensorList(self)); + torch::_foreach_frac_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log10_tensorlist(void* self) +void* _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_log10( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_lerp( + from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights))); LANTERN_FUNCTION_END } -void* _lantern__foreach_log10__tensorlist(void* self) +void* _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) { LANTERN_FUNCTION_START - torch::_foreach_log10_(from_raw::TensorList(self)); + torch::_foreach_lerp_(from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log1p_tensorlist(void* self) +void* _lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_log1p( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_lerp( + from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight))); LANTERN_FUNCTION_END } -void* _lantern__foreach_log1p__tensorlist(void* self) +void* _lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) { LANTERN_FUNCTION_START - torch::_foreach_log1p_(from_raw::TensorList(self)); + torch::_foreach_lerp_(from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log2_tensorlist(void* self) +void* _lantern__foreach_lgamma_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_log2( + return make_raw::TensorList(torch::_foreach_lgamma( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_log2__tensorlist(void* self) +void* _lantern__foreach_lgamma__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_log2_(from_raw::TensorList(self)); + torch::_foreach_lgamma_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_neg_tensorlist(void* self) +void* _lantern__foreach_log_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_neg( + return make_raw::TensorList(torch::_foreach_log( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_neg__tensorlist(void* self) +void* _lantern__foreach_log__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_neg_(from_raw::TensorList(self)); + torch::_foreach_log_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_tan_tensorlist(void* self) +void* _lantern__foreach_log10_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_tan( + return make_raw::TensorList(torch::_foreach_log10( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_tan__tensorlist(void* self) +void* _lantern__foreach_log10__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_tan_(from_raw::TensorList(self)); + torch::_foreach_log10_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_tanh_tensorlist(void* self) +void* _lantern__foreach_log1p_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_tanh( + return make_raw::TensorList(torch::_foreach_log1p( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_tanh__tensorlist(void* self) +void* _lantern__foreach_log1p__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_tanh_(from_raw::TensorList(self)); + torch::_foreach_log1p_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sin_tensorlist(void* self) +void* _lantern__foreach_log2_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sin( + return make_raw::TensorList(torch::_foreach_log2( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sin__tensorlist(void* self) +void* _lantern__foreach_log2__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_sin_(from_raw::TensorList(self)); + torch::_foreach_log2_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sinh_tensorlist(void* self) +void* _lantern__foreach_max_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sinh( + return make_raw::TensorList(torch::_foreach_max( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sinh__tensorlist(void* self) +void* _lantern__foreach_neg_tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_sinh_(from_raw::TensorList(self)); + return make_raw::TensorList(torch::_foreach_neg( + from_raw::TensorList(self))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_neg__tensorlist(void* self) +{ + LANTERN_FUNCTION_START + torch::_foreach_neg_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_round_tensorlist(void* self) +void* _lantern__foreach_norm_tensorlist_scalar_scalartype(void* self, void* ord, void* dtype) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_round( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_norm( + from_raw::TensorList(self), from_raw::Scalar(ord), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } -void* _lantern__foreach_round__tensorlist(void* self) +void* _lantern__foreach_pow_tensorlist_tensorlist(void* self, void* exponent) { LANTERN_FUNCTION_START - torch::_foreach_round_(from_raw::TensorList(self)); - return NULL; + return make_raw::TensorList(torch::_foreach_pow( + from_raw::TensorList(self), from_raw::TensorList(exponent))); LANTERN_FUNCTION_END } -void* _lantern__foreach_lgamma_tensorlist(void* self) +void* _lantern__foreach_pow_tensorlist_scalar(void* self, void* exponent) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_lgamma( - from_raw::TensorList(self))); + return make_raw::TensorList(torch::_foreach_pow( + from_raw::TensorList(self), from_raw::Scalar(exponent))); LANTERN_FUNCTION_END } -void* _lantern__foreach_lgamma__tensorlist(void* self) +void* _lantern__foreach_pow_tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_FUNCTION_START - torch::_foreach_lgamma_(from_raw::TensorList(self)); + return make_raw::TensorList(torch::_foreach_pow( + from_raw::TensorList(self), from_raw::vector::Scalar(exponent))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_pow_scalar_tensorlist(void* self, void* exponent) +{ + LANTERN_FUNCTION_START + return make_raw::TensorList(torch::_foreach_pow( + from_raw::Scalar(self), from_raw::TensorList(exponent))); + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_pow__tensorlist_tensorlist(void* self, void* exponent) +{ + LANTERN_FUNCTION_START + torch::_foreach_pow_(from_raw::TensorList(self), from_raw::TensorList(exponent)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_frac_tensorlist(void* self) +void* _lantern__foreach_pow__tensorlist_scalar(void* self, void* exponent) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_frac( - from_raw::TensorList(self))); + torch::_foreach_pow_(from_raw::TensorList(self), from_raw::Scalar(exponent)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_frac__tensorlist(void* self) +void* _lantern__foreach_pow__tensorlist_arrayrefscalar(void* self, void* exponent) { LANTERN_FUNCTION_START - torch::_foreach_frac_(from_raw::TensorList(self)); + torch::_foreach_pow_(from_raw::TensorList(self), from_raw::vector::Scalar(exponent)); return NULL; LANTERN_FUNCTION_END } @@ -18543,171 +19436,171 @@ void* _lantern__foreach_reciprocal__tensorlist(void* self) LANTERN_FUNCTION_END } -void* _lantern__foreach_sigmoid_tensorlist(void* self) +void* _lantern__foreach_round_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_sigmoid( + return make_raw::TensorList(torch::_foreach_round( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_sigmoid__tensorlist(void* self) +void* _lantern__foreach_round__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_sigmoid_(from_raw::TensorList(self)); + torch::_foreach_round_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_trunc_tensorlist(void* self) +void* _lantern__foreach_sigmoid_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_trunc( + return make_raw::TensorList(torch::_foreach_sigmoid( from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_trunc__tensorlist(void* self) +void* _lantern__foreach_sigmoid__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_trunc_(from_raw::TensorList(self)); + torch::_foreach_sigmoid_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sign_tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); - return NULL; + return make_raw::TensorList(torch::_foreach_sign( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sign__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); + torch::_foreach_sign_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sin_tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); - return NULL; + return make_raw::TensorList(torch::_foreach_sin( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sin__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); + torch::_foreach_sin_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sinh_tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); - return NULL; + return make_raw::TensorList(torch::_foreach_sinh( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul__tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sinh__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_(from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); + torch::_foreach_sinh_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sqrt_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcdiv( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value))); + return make_raw::TensorList(torch::_foreach_sqrt( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_scalar(void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sqrt__tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcmul( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value))); + torch::_foreach_sqrt_(from_raw::TensorList(self)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tan_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcdiv( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_tan( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tan__tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcdiv( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars))); + torch::_foreach_tan_(from_raw::TensorList(self)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tanh_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcmul( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars))); + return make_raw::TensorList(torch::_foreach_tanh( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_tensorlist_tensorlist_tensorlist_tensor(void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tanh__tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_addcmul( - from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars))); + torch::_foreach_tanh_(from_raw::TensorList(self)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_norm_tensorlist_scalar(void* self, void* ord) +void* _lantern__foreach_trunc_tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_norm( - from_raw::TensorList(self), from_raw::Scalar(ord))); + return make_raw::TensorList(torch::_foreach_trunc( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp_tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) +void* _lantern__foreach_trunc__tensorlist(void* self) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_lerp( - from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights))); + torch::_foreach_trunc_(from_raw::TensorList(self)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp__tensorlist_tensorlist_tensorlist(void* self, void* tensors1, void* weights) +void* _lantern__foreach_zero__tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_lerp_(from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights)); + torch::_foreach_zero_(from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp_tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) +void* _lantern__foreach_copy__tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_lerp( - from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight))); + torch::_foreach_copy_(from_raw::TensorList(self), from_raw::TensorList(src), from_raw::bool_t(non_blocking)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp__tensorlist_tensorlist_scalar(void* self, void* tensors1, void* weight) +void* _lantern__foreach_copy_tensorlist_tensorlist_bool(void* self, void* src, void* non_blocking) { LANTERN_FUNCTION_START - torch::_foreach_lerp_(from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight)); - return NULL; + return make_raw::TensorList(torch::_foreach_copy( + from_raw::TensorList(self), from_raw::TensorList(src), from_raw::bool_t(non_blocking))); LANTERN_FUNCTION_END } @@ -18754,8 +19647,16 @@ void* _lantern_searchsorted_out_tensor_tensor_tensor_bool_bool_cstringview_tenso void* _lantern_searchsorted_tensor_scalar_bool_bool_cstringview_tensor(void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::searchsorted( - from_raw::Tensor(sorted_sequence), from_raw::Scalar(self), from_raw::bool_t(out_int32), from_raw::bool_t(right), from_raw::optional::string_view(side), from_raw::optional::Tensor(sorter))); + return make_raw::Tensor(torch::searchsorted( + from_raw::Tensor(sorted_sequence), from_raw::Scalar(self), from_raw::bool_t(out_int32), from_raw::bool_t(right), from_raw::optional::string_view(side), from_raw::optional::Tensor(sorter))); + LANTERN_FUNCTION_END +} + +void* _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::searchsorted_out( + from_raw::Tensor(out), from_raw::Tensor(sorted_sequence), from_raw::Scalar(self), from_raw::bool_t(out_int32), from_raw::bool_t(right), from_raw::optional::string_view(side), from_raw::optional::Tensor(sorter))); LANTERN_FUNCTION_END } @@ -20099,7 +21000,7 @@ void* _lantern_upsample_linear1d_tensor_intarrayref_bool_arrayrefdouble(void* in { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_linear1d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20107,7 +21008,7 @@ void* _lantern_upsample_bilinear2d_tensor_intarrayref_bool_arrayrefdouble(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_bilinear2d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20115,7 +21016,7 @@ void* _lantern__upsample_bilinear2d_aa_tensor_intarrayref_bool_arrayrefdouble(vo { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_upsample_bilinear2d_aa( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20123,7 +21024,7 @@ void* _lantern_upsample_trilinear3d_tensor_intarrayref_bool_arrayrefdouble(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_trilinear3d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20131,7 +21032,7 @@ void* _lantern_upsample_bicubic2d_tensor_intarrayref_bool_arrayrefdouble(void* i { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_bicubic2d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20139,7 +21040,7 @@ void* _lantern__upsample_bicubic2d_aa_tensor_intarrayref_bool_arrayrefdouble(voi { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_upsample_bicubic2d_aa( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::bool_t(align_corners), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20147,7 +21048,7 @@ void* _lantern_upsample_nearest1d_tensor_intarrayref_arrayrefdouble(void* input, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_nearest1d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20155,7 +21056,7 @@ void* _lantern__upsample_nearest_exact1d_tensor_intarrayref_arrayrefdouble(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_upsample_nearest_exact1d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20163,7 +21064,7 @@ void* _lantern_upsample_nearest2d_tensor_intarrayref_arrayrefdouble(void* input, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_nearest2d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20171,7 +21072,7 @@ void* _lantern__upsample_nearest_exact2d_tensor_intarrayref_arrayrefdouble(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_upsample_nearest_exact2d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20179,7 +21080,7 @@ void* _lantern_upsample_nearest3d_tensor_intarrayref_arrayrefdouble(void* input, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::upsample_nearest3d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -20187,7 +21088,7 @@ void* _lantern__upsample_nearest_exact3d_tensor_intarrayref_arrayrefdouble(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_upsample_nearest_exact3d( - from_raw::Tensor(input), from_raw::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); + from_raw::Tensor(input), from_raw::optional::IntArrayRef(output_size), from_raw::optional::DoubleArrayRef(scale_factors))); LANTERN_FUNCTION_END } @@ -21611,7 +22512,7 @@ void* _lantern_fft_fft2_tensor_intarrayref_intarrayref_cstringview(void* self, v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_fft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21619,7 +22520,7 @@ void* _lantern_fft_fft2_out_tensor_tensor_intarrayref_intarrayref_cstringview(vo { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_fft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21627,7 +22528,7 @@ void* _lantern_fft_ifft2_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ifft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21635,7 +22536,7 @@ void* _lantern_fft_ifft2_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ifft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21643,7 +22544,7 @@ void* _lantern_fft_rfft2_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_rfft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21651,7 +22552,7 @@ void* _lantern_fft_rfft2_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_rfft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21659,7 +22560,7 @@ void* _lantern_fft_irfft2_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_irfft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21667,7 +22568,7 @@ void* _lantern_fft_irfft2_out_tensor_tensor_intarrayref_intarrayref_cstringview( { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_irfft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21675,7 +22576,7 @@ void* _lantern_fft_hfft2_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_hfft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21683,7 +22584,7 @@ void* _lantern_fft_hfft2_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_hfft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21691,7 +22592,7 @@ void* _lantern_fft_ihfft2_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ihfft2( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21699,7 +22600,7 @@ void* _lantern_fft_ihfft2_out_tensor_tensor_intarrayref_intarrayref_cstringview( { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ihfft2_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21707,7 +22608,7 @@ void* _lantern_fft_fftn_tensor_intarrayref_intarrayref_cstringview(void* self, v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_fftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21715,7 +22616,7 @@ void* _lantern_fft_fftn_out_tensor_tensor_intarrayref_intarrayref_cstringview(vo { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_fftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21723,7 +22624,7 @@ void* _lantern_fft_ifftn_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ifftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21731,7 +22632,7 @@ void* _lantern_fft_ifftn_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ifftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21739,7 +22640,7 @@ void* _lantern_fft_rfftn_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_rfftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21747,7 +22648,7 @@ void* _lantern_fft_rfftn_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_rfftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21755,7 +22656,7 @@ void* _lantern_fft_irfftn_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_irfftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21763,7 +22664,7 @@ void* _lantern_fft_irfftn_out_tensor_tensor_intarrayref_intarrayref_cstringview( { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_irfftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21771,7 +22672,7 @@ void* _lantern_fft_hfftn_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_hfftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21779,7 +22680,7 @@ void* _lantern_fft_hfftn_out_tensor_tensor_intarrayref_intarrayref_cstringview(v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_hfftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21787,7 +22688,7 @@ void* _lantern_fft_ihfftn_tensor_intarrayref_intarrayref_cstringview(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ihfftn( - from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21795,7 +22696,7 @@ void* _lantern_fft_ihfftn_out_tensor_tensor_intarrayref_intarrayref_cstringview( { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ihfftn_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(s), from_raw::IntArrayRef(dim), from_raw::optional::string_view(norm))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::IntArrayRef(s), from_raw::optional::IntArrayRef(dim), from_raw::optional::string_view(norm))); LANTERN_FUNCTION_END } @@ -21835,7 +22736,7 @@ void* _lantern_fft_fftshift_tensor_intarrayref(void* self, void* dim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_fftshift( - from_raw::Tensor(self), from_raw::IntArrayRef(dim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim))); LANTERN_FUNCTION_END } @@ -21843,7 +22744,7 @@ void* _lantern_fft_ifftshift_tensor_intarrayref(void* self, void* dim) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::fft_ifftshift( - from_raw::Tensor(self), from_raw::IntArrayRef(dim))); + from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim))); LANTERN_FUNCTION_END } @@ -22199,6 +23100,14 @@ void* _lantern_linalg_eig_out_tensor_tensor_tensor(void* eigenvalues, void* eige LANTERN_FUNCTION_END } +void* _lantern__linalg_eigvals_tensor(void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_linalg_eigvals( + from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + void* _lantern_linalg_eigvals_tensor(void* self) { LANTERN_FUNCTION_START @@ -22411,7 +23320,7 @@ void* _lantern_linalg_norm_tensor_scalar_intarrayref_bool_scalartype(void* self, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_norm( - from_raw::Tensor(self), from_raw::optional::Scalar(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::optional::Scalar(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22419,7 +23328,7 @@ void* _lantern_linalg_norm_tensor_cstringview_intarrayref_bool_scalartype(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_norm( - from_raw::Tensor(self), from_raw::string_view(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::string_view(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22427,7 +23336,7 @@ void* _lantern_linalg_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype( { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_norm_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::Scalar(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::Scalar(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22435,7 +23344,7 @@ void* _lantern_linalg_norm_out_tensor_tensor_cstringview_intarrayref_bool_scalar { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_norm_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::string_view(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::string_view(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22443,7 +23352,7 @@ void* _lantern_linalg_vector_norm_tensor_scalar_intarrayref_bool_scalartype(void { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_vector_norm( - from_raw::Tensor(self), from_raw::Scalar(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(self), from_raw::Scalar(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22451,7 +23360,7 @@ void* _lantern_linalg_vector_norm_out_tensor_tensor_scalar_intarrayref_bool_scal { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_vector_norm_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Scalar(ord), from_raw::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Scalar(ord), from_raw::optional::IntArrayRef(dim), from_raw::bool_t(keepdim), from_raw::optional::ScalarType(dtype))); LANTERN_FUNCTION_END } @@ -22671,6 +23580,14 @@ void* _lantern_linalg_solve_tensor_tensor_bool(void* A, void* B, void* left) LANTERN_FUNCTION_END } +void* _lantern__spsolve_tensor_tensor_bool(void* A, void* B, void* left) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_spsolve( + from_raw::Tensor(A), from_raw::Tensor(B), from_raw::bool_t(left))); + LANTERN_FUNCTION_END +} + void* _lantern_linalg_solve_out_tensor_tensor_tensor_bool(void* out, void* A, void* B, void* left) { LANTERN_FUNCTION_START @@ -22699,7 +23616,7 @@ void* _lantern_linalg_tensorsolve_tensor_tensor_intarrayref(void* self, void* ot { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_tensorsolve( - from_raw::Tensor(self), from_raw::Tensor(other), from_raw::IntArrayRef(dims))); + from_raw::Tensor(self), from_raw::Tensor(other), from_raw::optional::IntArrayRef(dims))); LANTERN_FUNCTION_END } @@ -22707,7 +23624,7 @@ void* _lantern_linalg_tensorsolve_out_tensor_tensor_tensor_intarrayref(void* out { LANTERN_FUNCTION_START return make_raw::Tensor(torch::linalg_tensorsolve_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(other), from_raw::IntArrayRef(dims))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(other), from_raw::optional::IntArrayRef(dims))); LANTERN_FUNCTION_END } @@ -22827,7 +23744,7 @@ void* _lantern_nested_to_padded_tensor_tensor_double_intarrayref(void* self, voi { LANTERN_FUNCTION_START return make_raw::Tensor(torch::nested_to_padded_tensor( - from_raw::Tensor(self), from_raw::double_t(padding), from_raw::IntArrayRef(output_size))); + from_raw::Tensor(self), from_raw::double_t(padding), from_raw::optional::IntArrayRef(output_size))); LANTERN_FUNCTION_END } @@ -22839,11 +23756,19 @@ void* _lantern__test_serialization_subcmul_tensor_tensor_scalar(void* self, void LANTERN_FUNCTION_END } +void* _lantern__test_parallel_materialize_tensor_intt_bool(void* self, void* num_parallel, void* skip_first) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_test_parallel_materialize( + from_raw::Tensor(self), from_raw::int64_t(num_parallel), from_raw::bool_t(skip_first))); + LANTERN_FUNCTION_END +} + void* _lantern__test_optional_intlist_tensor_intarrayref(void* values, void* addends) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_test_optional_intlist( - from_raw::Tensor(values), from_raw::IntArrayRef(addends))); + from_raw::Tensor(values), from_raw::optional::IntArrayRef(addends))); LANTERN_FUNCTION_END } @@ -22851,7 +23776,7 @@ void* _lantern__test_optional_filled_intlist_tensor_intarrayref(void* values, vo { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_test_optional_filled_intlist( - from_raw::Tensor(values), from_raw::IntArrayRef(addends))); + from_raw::Tensor(values), from_raw::optional::IntArrayRef(addends))); LANTERN_FUNCTION_END } @@ -22943,11 +23868,11 @@ void* _lantern__segment_reduce_backward_tensor_tensor_tensor_cstringview_tensor_ LANTERN_FUNCTION_END } -void* _lantern_pad_sequence_tensorlist_bool_double(void* sequences, void* batch_first, void* padding_value) +void* _lantern_pad_sequence_tensorlist_bool_double_cstringview(void* sequences, void* batch_first, void* padding_value, void* padding_side) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::pad_sequence( - from_raw::TensorList(sequences), from_raw::bool_t(batch_first), from_raw::double_t(padding_value))); + from_raw::TensorList(sequences), from_raw::bool_t(batch_first), from_raw::double_t(padding_value), from_raw::string_view(padding_side))); LANTERN_FUNCTION_END } @@ -23291,7 +24216,23 @@ void* _lantern_Tensor_to_padded_tensor_tensor_double_intarrayref(void* self, voi { LANTERN_FUNCTION_START return make_raw::Tensor(from_raw::Tensor(self).to_padded_tensor( - from_raw::double_t(padding), from_raw::IntArrayRef(output_size))); + from_raw::double_t(padding), from_raw::optional::IntArrayRef(output_size))); + LANTERN_FUNCTION_END +} + +void* _lantern__jagged_to_padded_dense_forward_tensor_tensorlist_intarrayref_double(void* values, void* offsets, void* max_lengths, void* padding_value) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_jagged_to_padded_dense_forward( + from_raw::Tensor(values), from_raw::TensorList(offsets), from_raw::IntArrayRef(max_lengths), from_raw::double_t(padding_value))); + LANTERN_FUNCTION_END +} + +void* _lantern__padded_dense_to_jagged_forward_tensor_tensorlist_intt(void* dense, void* offsets, void* total_L) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_padded_dense_to_jagged_forward( + from_raw::Tensor(dense), from_raw::TensorList(offsets), from_raw::optional::int64_t(total_L))); LANTERN_FUNCTION_END } @@ -23303,6 +24244,14 @@ void* _lantern__nested_tensor_softmax_with_shape_tensor_tensor(void* self, void* LANTERN_FUNCTION_END } +void* _lantern__safe_softmax_tensor_intt_scalartype(void* self, void* dim, void* dtype) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_safe_softmax( + from_raw::Tensor(self), from_raw::int64_t(dim), from_raw::optional::ScalarType(dtype))); + LANTERN_FUNCTION_END +} + void* _lantern__transformer_encoder_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* mask_type) { LANTERN_FUNCTION_START @@ -23319,107 +24268,147 @@ void* _lantern__native_multi_head_attention_tensor_tensor_tensor_intt_intt_tenso LANTERN_FUNCTION_END } -void* _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) +void* _lantern_scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::scaled_dot_product_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal))); + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::double_t(scale), from_raw::bool_t(enable_gqa))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_attention_tensor_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* need_attn_weights, void* is_causal) +void* _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* scale, void* enable_gqa) { LANTERN_FUNCTION_START - return make_raw::tuple(torch::_scaled_dot_product_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(need_attn_weights), from_raw::bool_t(is_causal))); + return make_raw::int64_t(torch::_fused_sdp_choice( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::double_t(scale), from_raw::bool_t(enable_gqa))); LANTERN_FUNCTION_END } -void* _lantern__fused_sdp_choice_tensor_tensor_tensor_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal) +void* _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor_double_bool(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale, void* enable_gqa) { LANTERN_FUNCTION_START - return make_raw::int64_t(torch::_fused_sdp_choice( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal))); + return make_raw::tuple(torch::_scaled_dot_product_attention_math( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::Tensor(dropout_mask), from_raw::optional::double_t(scale), from_raw::bool_t(enable_gqa))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_attention_math_tensor_tensor_tensor_tensor_double_bool_tensor(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask) +void* _lantern__scaled_dot_product_attention_math_for_mps_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* attn_mask, void* dropout_p, void* is_causal, void* dropout_mask, void* scale) { LANTERN_FUNCTION_START - return make_raw::tuple(torch::_scaled_dot_product_attention_math( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::Tensor(dropout_mask))); + return make_raw::tuple(torch::_scaled_dot_product_attention_math_for_mps( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_mask), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::Tensor(dropout_mask), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask) +void* _lantern__scaled_dot_product_flash_attention_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_scaled_dot_product_flash_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask))); + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask), from_raw::optional::double_t(scale))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_dot_product_flash_attention_for_cpu_tensor_tensor_tensor_double_bool_tensor_double(void* query, void* key, void* value, void* dropout_p, void* is_causal, void* attn_mask, void* scale) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_scaled_dot_product_flash_attention_for_cpu( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::Tensor(attn_mask), from_raw::optional::double_t(scale))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_dot_product_fused_attention_overrideable_tensor_tensor_tensor_tensor_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_scaled_dot_product_fused_attention_overrideable( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_bias), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) +void* _lantern__scaled_dot_product_flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_scaled_dot_product_flash_attention_backward( - from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::int64_t(philox_seed), from_raw::int64_t(philox_offset))); + from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::optional::double_t(scale))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_dot_product_flash_attention_for_cpu_backward_tensor_tensor_tensor_tensor_tensor_tensor_double_bool_tensor_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* dropout_p, void* is_causal, void* attn_mask, void* scale) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_scaled_dot_product_flash_attention_for_cpu_backward( + from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::Tensor(attn_mask), from_raw::optional::double_t(scale))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_dot_product_fused_attention_overrideable_backward_tensor_tensor_tensor_tensor_tensor_stdarraybool_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double(void* grad_out, void* query, void* key, void* value, void* attn_bias, void* grad_input_mask, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_scaled_dot_product_fused_attention_overrideable_backward( + from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(attn_bias), from_raw::vector::bool_t(grad_input_mask), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* compute_log_sumexp, void* is_causal) +void* _lantern__scaled_dot_product_efficient_attention_tensor_tensor_tensor_tensor_bool_double_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* scale) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_scaled_dot_product_efficient_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::bool_t(compute_log_sumexp), from_raw::bool_t(is_causal))); + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_bias), from_raw::bool_t(compute_log_sumexp), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) +void* _lantern__scaled_dot_product_efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_stdarraybool_bool_double(void* grad_out_, void* query, void* key, void* value, void* attn_bias, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* dropout_p, void* grad_input_mask, void* is_causal, void* scale) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_scaled_dot_product_efficient_attention_backward( - from_raw::Tensor(grad_out_), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::bool_t(is_causal), from_raw::bool_t(chunk_grad_outputs))); + from_raw::Tensor(grad_out_), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(attn_bias), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::double_t(dropout_p), from_raw::vector::bool_t(grad_input_mask), from_raw::bool_t(is_causal), from_raw::optional::double_t(scale))); + LANTERN_FUNCTION_END +} + +void* _lantern__scaled_dot_product_cudnn_attention_tensor_tensor_tensor_tensor_bool_double_bool_bool_double(void* query, void* key, void* value, void* attn_bias, void* compute_log_sumexp, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_scaled_dot_product_cudnn_attention( + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(attn_bias), from_raw::bool_t(compute_log_sumexp), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__chunk_grad_outputs_efficient_attention_tensor_tensor_tensor_bool(void* query, void* key, void* value, void* is_causal) +void* _lantern__scaled_dot_product_cudnn_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_double(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* philox_seed, void* philox_offset, void* attn_bias, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* scale) { LANTERN_FUNCTION_START - return make_raw::bool_t(torch::_chunk_grad_outputs_efficient_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::bool_t(is_causal))); + return make_raw::tuple(torch::_scaled_dot_product_cudnn_attention_backward( + from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::Tensor(attn_bias), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::optional::double_t(scale))); LANTERN_FUNCTION_END } -void* _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask) +void* _lantern__flash_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_bool_double_intt_intt_tensor_tensor(void* query, void* key, void* value, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* return_debug_mask, void* scale, void* window_size_left, void* window_size_right, void* seqused_k, void* alibi_slopes) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_flash_attention_forward( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask))); + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(cum_seq_q), from_raw::optional::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::bool_t(return_debug_mask), from_raw::optional::double_t(scale), from_raw::optional::int64_t(window_size_left), from_raw::optional::int64_t(window_size_right), from_raw::optional::Tensor(seqused_k), from_raw::optional::Tensor(alibi_slopes))); LANTERN_FUNCTION_END } -void* _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset) +void* _lantern__flash_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_bool_tensor_tensor_double_intt_intt(void* grad_out, void* query, void* key, void* value, void* out, void* logsumexp, void* cum_seq_q, void* cum_seq_k, void* max_q, void* max_k, void* dropout_p, void* is_causal, void* philox_seed, void* philox_offset, void* scale, void* window_size_left, void* window_size_right) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_flash_attention_backward( - from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::int64_t(philox_seed), from_raw::int64_t(philox_offset))); + from_raw::Tensor(grad_out), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::Tensor(cum_seq_q), from_raw::Tensor(cum_seq_k), from_raw::int64_t(max_q), from_raw::int64_t(max_k), from_raw::double_t(dropout_p), from_raw::bool_t(is_causal), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::optional::double_t(scale), from_raw::optional::int64_t(window_size_left), from_raw::optional::int64_t(window_size_right))); LANTERN_FUNCTION_END } -void* _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_intt_bool_bool(void* query, void* key, void* value, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* compute_log_sumexp, void* causal) +void* _lantern__efficient_attention_forward_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_double_intt_bool_double_tensor_intt(void* query, void* key, void* value, void* bias, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* dropout_p, void* custom_mask_type, void* compute_log_sumexp, void* scale, void* seqlen_k, void* window_size) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_efficient_attention_forward( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(cu_seqlens_q), from_raw::optional::Tensor(cu_seqlens_k), from_raw::optional::int64_t(max_seqlen_q), from_raw::bool_t(compute_log_sumexp), from_raw::bool_t(causal))); + from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(cu_seqlens_q), from_raw::optional::Tensor(cu_seqlens_k), from_raw::optional::int64_t(max_seqlen_q), from_raw::optional::int64_t(max_seqlen_k), from_raw::double_t(dropout_p), from_raw::int64_t(custom_mask_type), from_raw::bool_t(compute_log_sumexp), from_raw::optional::double_t(scale), from_raw::optional::Tensor(seqlen_k), from_raw::optional::int64_t(window_size))); LANTERN_FUNCTION_END } -void* _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* grad_out_, void* query, void* key, void* value, void* out, void* logsumexp, void* is_causal, void* chunk_grad_outputs) +void* _lantern__efficient_attention_backward_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_double_tensor_tensor_intt_bool_double_intt_intt_bool(void* grad_out_, void* query, void* key, void* value, void* bias, void* out, void* cu_seqlens_q, void* cu_seqlens_k, void* max_seqlen_q, void* max_seqlen_k, void* logsumexp, void* dropout_p, void* philox_seed, void* philox_offset, void* custom_mask_type, void* bias_requires_grad, void* scale, void* num_splits_key, void* window_size, void* shared_storage_dqdkdv) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_efficient_attention_backward( - from_raw::Tensor(grad_out_), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::Tensor(out), from_raw::Tensor(logsumexp), from_raw::bool_t(is_causal), from_raw::bool_t(chunk_grad_outputs))); + from_raw::Tensor(grad_out_), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::optional::Tensor(bias), from_raw::Tensor(out), from_raw::optional::Tensor(cu_seqlens_q), from_raw::optional::Tensor(cu_seqlens_k), from_raw::int64_t(max_seqlen_q), from_raw::int64_t(max_seqlen_k), from_raw::Tensor(logsumexp), from_raw::double_t(dropout_p), from_raw::Tensor(philox_seed), from_raw::Tensor(philox_offset), from_raw::int64_t(custom_mask_type), from_raw::bool_t(bias_requires_grad), from_raw::optional::double_t(scale), from_raw::optional::int64_t(num_splits_key), from_raw::optional::int64_t(window_size), from_raw::bool_t(shared_storage_dqdkdv))); LANTERN_FUNCTION_END } @@ -23431,6 +24420,14 @@ void* _lantern__triton_scaled_dot_attention_tensor_tensor_tensor_double(void* q, LANTERN_FUNCTION_END } +void* _lantern__fill_mem_eff_dropout_mask__tensor_double_intt_intt(void* self, void* dropout_p, void* seed, void* offset) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_fill_mem_eff_dropout_mask_( + from_raw::Tensor(self), from_raw::double_t(dropout_p), from_raw::int64_t(seed), from_raw::int64_t(offset))); + LANTERN_FUNCTION_END +} + void* _lantern__triton_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask) { LANTERN_FUNCTION_START @@ -23455,22 +24452,6 @@ void* _lantern_special_airy_ai_out_tensor_tensor(void* out, void* x) LANTERN_FUNCTION_END } -void* _lantern__transformer_decoder_only_layer_fwd_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) -{ - LANTERN_FUNCTION_START - return make_raw::tuple(torch::_transformer_decoder_only_layer_fwd( - from_raw::Tensor(src), from_raw::int64_t(embed_dim), from_raw::int64_t(num_heads), from_raw::Tensor(qkv_weight), from_raw::Tensor(qkv_bias), from_raw::Tensor(proj_weight), from_raw::Tensor(proj_bias), from_raw::bool_t(use_gelu), from_raw::bool_t(norm_first), from_raw::double_t(eps), from_raw::Tensor(norm_weight_1), from_raw::Tensor(norm_bias_1), from_raw::Tensor(norm_weight_2), from_raw::Tensor(norm_bias_2), from_raw::Tensor(ffn_weight_1), from_raw::Tensor(ffn_bias_1), from_raw::Tensor(ffn_weight_2), from_raw::Tensor(ffn_bias_2), from_raw::optional::Tensor(mask), from_raw::optional::Tensor(incr_key), from_raw::optional::Tensor(incr_value))); - LANTERN_FUNCTION_END -} - -void* _lantern__native_decoder_only_multi_head_attention_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) -{ - LANTERN_FUNCTION_START - return make_raw::tuple(torch::_native_decoder_only_multi_head_attention( - from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::int64_t(embed_dim), from_raw::int64_t(num_head), from_raw::Tensor(qkv_weight), from_raw::Tensor(qkv_bias), from_raw::Tensor(proj_weight), from_raw::Tensor(proj_bias), from_raw::optional::Tensor(mask), from_raw::optional::Tensor(incr_key), from_raw::optional::Tensor(incr_value), from_raw::bool_t(need_weights), from_raw::bool_t(average_attn_weights))); - LANTERN_FUNCTION_END -} - void* _lantern_special_bessel_j0_tensor(void* self) { LANTERN_FUNCTION_START @@ -24239,6 +25220,14 @@ void* _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorli LANTERN_FUNCTION_END } +void* _lantern__fused_adam__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_adam_(from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_FUNCTION_START @@ -24247,6 +25236,46 @@ void* _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorl LANTERN_FUNCTION_END } +void* _lantern__fused_adamw__tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_adamw_(from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_sgd_(from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::double_t(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd__tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_sgd_(from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::Tensor(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_adagrad__tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_adagrad_(from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(state_sums), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(lr_decay), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__propagate_xla_data_tensor_tensor(void* input, void* output) +{ + LANTERN_FUNCTION_START + torch::_propagate_xla_data(from_raw::Tensor(input), from_raw::Tensor(output)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__new_zeros_with_same_feature_meta_out_tensor_tensor_tensor_intt(void* out, void* self, void* other, void* self_num_batch_dims) { LANTERN_FUNCTION_START @@ -24359,6 +25388,14 @@ void* _lantern_affine_grid_generator_out_tensor_tensor_intarrayref_bool(void* ou LANTERN_FUNCTION_END } +void* _lantern__test_functorch_fallback_out_tensor_tensor_tensor(void* out, void* self, void* other) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_test_functorch_fallback_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(other))); + LANTERN_FUNCTION_END +} + void* _lantern_bartlett_window_out_tensor_intt(void* out, void* window_length) { LANTERN_FUNCTION_START @@ -24467,7 +25504,7 @@ void* _lantern_convolution_backward_out_tensor_tensor_tensor_tensor_tensor_tenso { LANTERN_FUNCTION_START return make_raw::tuple(torch::convolution_backward_out( - from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(grad_output), from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::IntArrayRef(bias_sizes), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(transposed), from_raw::IntArrayRef(output_padding), from_raw::int64_t(groups), from_raw::vector::bool_t(output_mask))); + from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(grad_output), from_raw::Tensor(input), from_raw::Tensor(weight), from_raw::optional::IntArrayRef(bias_sizes), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(transposed), from_raw::IntArrayRef(output_padding), from_raw::int64_t(groups), from_raw::vector::bool_t(output_mask))); LANTERN_FUNCTION_END } @@ -24575,14 +25612,6 @@ void* _lantern_cudnn_batch_norm_backward_out_tensor_tensor_tensor_tensor_tensor_ LANTERN_FUNCTION_END } -void* _lantern_cudnn_convolution_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) -{ - LANTERN_FUNCTION_START - return make_raw::Tensor(torch::cudnn_convolution_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(weight), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::bool_t(benchmark), from_raw::bool_t(deterministic), from_raw::bool_t(allow_tf32))); - LANTERN_FUNCTION_END -} - void* _lantern_cudnn_convolution_transpose_out_tensor_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_intt_bool_bool_bool(void* out, void* self, void* weight, void* padding, void* output_padding, void* stride, void* dilation, void* groups, void* benchmark, void* deterministic, void* allow_tf32) { LANTERN_FUNCTION_START @@ -24767,6 +25796,14 @@ void* _lantern_empty_out_tensor_intarrayref_dimnamelist_memoryformat(void* out, LANTERN_FUNCTION_END } +void* _lantern_empty_permuted_out_tensor_intarrayref_intarrayref(void* out, void* size, void* physical_layout) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::empty_permuted_out( + from_raw::Tensor(out), from_raw::IntArrayRef(size), from_raw::IntArrayRef(physical_layout))); + LANTERN_FUNCTION_END +} + void* _lantern_new_empty_out_tensor_tensor_intarrayref(void* out, void* self, void* size) { LANTERN_FUNCTION_START @@ -24895,6 +25932,14 @@ void* _lantern_fill_out_tensor_tensor_tensor(void* out, void* self, void* value) LANTERN_FUNCTION_END } +void* _lantern_floor_divide_out_tensor_tensor_scalar(void* out, void* self, void* other) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::floor_divide_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Scalar(other))); + LANTERN_FUNCTION_END +} + void* _lantern_full_out_tensor_intarrayref_scalar_dimnamelist(void* out, void* size, void* fill_value, void* names) { LANTERN_FUNCTION_START @@ -25047,7 +26092,7 @@ void* _lantern_native_group_norm_backward_out_tensor_tensor_tensor_tensor_tensor LANTERN_FUNCTION_END } -void* _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) +void* _lantern_index_put_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool(void* out, void* self, void* indices, void* values, void* accumulate) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::index_put_out( @@ -25055,7 +26100,7 @@ void* _lantern_index_put_out_tensor_tensor_constclistcoptionaltensor_tensor_bool LANTERN_FUNCTION_END } -void* _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) +void* _lantern__index_put_impl_out_tensor_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* out, void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_index_put_impl_out( @@ -25063,7 +26108,7 @@ void* _lantern__index_put_impl_out_tensor_tensor_constclistcoptionaltensor_tenso LANTERN_FUNCTION_END } -void* _lantern__index_put_impl_tensor_constclistcoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) +void* _lantern__index_put_impl_tensor_constcliststdoptionaltensor_tensor_bool_bool(void* self, void* indices, void* values, void* accumulate, void* unsafe) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_index_put_impl( @@ -25215,6 +26260,14 @@ void* _lantern_quantized_max_pool2d_out_tensor_tensor_intarrayref_intarrayref_in LANTERN_FUNCTION_END } +void* _lantern_quantized_max_pool3d_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intarrayref_bool(void* out, void* self, void* kernel_size, void* stride, void* padding, void* dilation, void* ceil_mode) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::quantized_max_pool3d_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(kernel_size), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(dilation), from_raw::bool_t(ceil_mode))); + LANTERN_FUNCTION_END +} + void* _lantern_median_out_tensor_tensor(void* out, void* self) { LANTERN_FUNCTION_START @@ -25351,6 +26404,14 @@ void* _lantern__native_batch_norm_legit_functional_tensor_tensor_tensor_tensor_t LANTERN_FUNCTION_END } +void* _lantern__native_batch_norm_legit_no_training_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_native_batch_norm_legit_no_training_out( + from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::Tensor(running_mean), from_raw::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + void* _lantern_batch_norm_stats_out_tensor_tensor_tensor_double(void* out0, void* out1, void* input, void* eps) { LANTERN_FUNCTION_START @@ -25391,11 +26452,11 @@ void* _lantern_batch_norm_backward_reduce_out_tensor_tensor_tensor_tensor_tensor LANTERN_FUNCTION_END } -void* _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* mean_dy, void* mean_dy_xmu, void* count) +void* _lantern_batch_norm_backward_elemt_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out, void* grad_out, void* input, void* mean, void* invstd, void* weight, void* sum_dy, void* sum_dy_xmu, void* count) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::batch_norm_backward_elemt_out( - from_raw::Tensor(out), from_raw::Tensor(grad_out), from_raw::Tensor(input), from_raw::Tensor(mean), from_raw::Tensor(invstd), from_raw::optional::Tensor(weight), from_raw::Tensor(mean_dy), from_raw::Tensor(mean_dy_xmu), from_raw::Tensor(count))); + from_raw::Tensor(out), from_raw::Tensor(grad_out), from_raw::Tensor(input), from_raw::Tensor(mean), from_raw::Tensor(invstd), from_raw::optional::Tensor(weight), from_raw::Tensor(sum_dy), from_raw::Tensor(sum_dy_xmu), from_raw::Tensor(count))); LANTERN_FUNCTION_END } @@ -25687,11 +26748,11 @@ void* _lantern_sum_out_tensor_tensor_scalartype(void* out, void* self, void* dty LANTERN_FUNCTION_END } -void* _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_std_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::std_mean_out( - from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -25775,6 +26836,14 @@ void* _lantern__nested_tensor_strides_out_tensor_tensor(void* out, void* self) LANTERN_FUNCTION_END } +void* _lantern__nested_tensor_storage_offsets_out_tensor_tensor(void* out, void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_tensor_storage_offsets_out( + from_raw::Tensor(out), from_raw::Tensor(self))); + LANTERN_FUNCTION_END +} + void* _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(void* out, void* padded, void* nt_example) { LANTERN_FUNCTION_START @@ -25783,11 +26852,27 @@ void* _lantern__nested_from_padded_and_nested_example_out_tensor_tensor_tensor(v LANTERN_FUNCTION_END } -void* _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_intarrayref(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) +void* _lantern__nested_view_from_buffer_copy_out_tensor_tensor_tensor_tensor_tensor(void* out, void* self, void* nested_size, void* nested_strides, void* offsets) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_nested_view_from_buffer_copy_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::IntArrayRef(offsets))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(nested_size), from_raw::Tensor(nested_strides), from_raw::Tensor(offsets))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_view_from_jagged_copy_out_tensor_tensor_tensor_tensor_tensor_intt_tensor_tensor(void* out, void* self, void* offsets, void* dummy, void* lengths, void* ragged_idx, void* min_seqlen, void* max_seqlen) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_view_from_jagged_copy_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(offsets), from_raw::Tensor(dummy), from_raw::optional::Tensor(lengths), from_raw::int64_t(ragged_idx), from_raw::optional::Tensor(min_seqlen), from_raw::optional::Tensor(max_seqlen))); + LANTERN_FUNCTION_END +} + +void* _lantern__nested_get_values_copy_out_tensor_tensor(void* out, void* self) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_nested_get_values_copy_out( + from_raw::Tensor(out), from_raw::Tensor(self))); LANTERN_FUNCTION_END } @@ -25847,11 +26932,11 @@ void* _lantern__unsafe_view_out_tensor_tensor_intarrayref(void* out, void* self, LANTERN_FUNCTION_END } -void* _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_intt_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) +void* _lantern_var_mean_out_tensor_tensor_tensor_intarrayref_scalar_bool(void* out0, void* out1, void* self, void* dim, void* correction, void* keepdim) { LANTERN_FUNCTION_START return make_raw::tuple(torch::var_mean_out( - from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(self), from_raw::IntArrayRef(dim), from_raw::optional::int64_t(correction), from_raw::bool_t(keepdim))); + from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(self), from_raw::optional::IntArrayRef(dim), from_raw::optional::Scalar(correction), from_raw::bool_t(keepdim))); LANTERN_FUNCTION_END } @@ -25959,6 +27044,22 @@ void* _lantern_native_norm_out_tensor_tensor_scalar_intarrayref_bool_scalartype( LANTERN_FUNCTION_END } +void* _lantern__batch_norm_with_update_functional_tensor_tensor_tensor_tensor_tensor_double_double(void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_batch_norm_with_update_functional( + from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::Tensor(running_mean), from_raw::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + +void* _lantern__batch_norm_no_update_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_double_double(void* out0, void* out1, void* out2, void* out3, void* input, void* weight, void* bias, void* running_mean, void* running_var, void* momentum, void* eps) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_batch_norm_no_update_out( + from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(out3), from_raw::Tensor(input), from_raw::optional::Tensor(weight), from_raw::optional::Tensor(bias), from_raw::optional::Tensor(running_mean), from_raw::optional::Tensor(running_var), from_raw::double_t(momentum), from_raw::double_t(eps))); + LANTERN_FUNCTION_END +} + void* _lantern__sparse_sum_out_tensor_tensor_intarrayref(void* out, void* self, void* dim) { LANTERN_FUNCTION_START @@ -26151,11 +27252,11 @@ void* _lantern__sparse_coo_tensor_with_dims_out_tensor_intt_intt_intarrayref(voi LANTERN_FUNCTION_END } -void* _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values) +void* _lantern__sparse_coo_tensor_with_dims_and_tensors_out_tensor_intt_intt_intarrayref_tensor_tensor_bool(void* out, void* sparse_dim, void* dense_dim, void* size, void* indices, void* values, void* is_coalesced) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_sparse_coo_tensor_with_dims_and_tensors_out( - from_raw::Tensor(out), from_raw::int64_t(sparse_dim), from_raw::int64_t(dense_dim), from_raw::IntArrayRef(size), from_raw::Tensor(indices), from_raw::Tensor(values))); + from_raw::Tensor(out), from_raw::int64_t(sparse_dim), from_raw::int64_t(dense_dim), from_raw::IntArrayRef(size), from_raw::Tensor(indices), from_raw::Tensor(values), from_raw::optional::bool_t(is_coalesced))); LANTERN_FUNCTION_END } @@ -26199,11 +27300,19 @@ void* _lantern_sparse_mask_out_tensor_tensor_tensor(void* out, void* self, void* LANTERN_FUNCTION_END } -void* _lantern__to_dense_out_tensor_tensor_scalartype(void* out, void* self, void* dtype) +void* _lantern__sparse_mask_projection_out_tensor_tensor_tensor_bool(void* out, void* self, void* mask, void* accumulate_matches) +{ + LANTERN_FUNCTION_START + return make_raw::Tensor(torch::_sparse_mask_projection_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::Tensor(mask), from_raw::bool_t(accumulate_matches))); + LANTERN_FUNCTION_END +} + +void* _lantern__to_dense_out_tensor_tensor_scalartype_bool(void* out, void* self, void* dtype, void* masked_grad) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_to_dense_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::ScalarType(dtype))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::ScalarType(dtype), from_raw::optional::bool_t(masked_grad))); LANTERN_FUNCTION_END } @@ -26247,50 +27356,50 @@ void* _lantern_copy_sparse_to_sparse_tensor_tensor_bool(void* self, void* src, v LANTERN_FUNCTION_END } -void* _lantern_to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) +void* _lantern__to_sparse_out_tensor_tensor_intt(void* out, void* self, void* sparse_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_out( + return make_raw::Tensor(torch::_to_sparse_out( from_raw::Tensor(out), from_raw::Tensor(self), from_raw::int64_t(sparse_dim))); LANTERN_FUNCTION_END } -void* _lantern_to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) +void* _lantern__to_sparse_out_tensor_tensor_layout_intarrayref_intt(void* out, void* self, void* layout, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::Layout(layout), from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); + return make_raw::Tensor(torch::_to_sparse_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::Layout(layout), from_raw::optional::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) +void* _lantern__to_sparse_csr_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_csr_out( + return make_raw::Tensor(torch::_to_sparse_csr_out( from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) +void* _lantern__to_sparse_csc_out_tensor_tensor_intt(void* out, void* self, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_csc_out( + return make_raw::Tensor(torch::_to_sparse_csc_out( from_raw::Tensor(out), from_raw::Tensor(self), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) +void* _lantern__to_sparse_bsr_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_bsr_out( + return make_raw::Tensor(torch::_to_sparse_bsr_out( from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } -void* _lantern_to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) +void* _lantern__to_sparse_bsc_out_tensor_tensor_intarrayref_intt(void* out, void* self, void* blocksize, void* dense_dim) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::to_sparse_bsc_out( + return make_raw::Tensor(torch::_to_sparse_bsc_out( from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(blocksize), from_raw::optional::int64_t(dense_dim))); LANTERN_FUNCTION_END } @@ -26307,15 +27416,15 @@ void* _lantern_mkldnn_reorder_conv2d_weight_out_tensor_tensor_intarrayref_intarr { LANTERN_FUNCTION_START return make_raw::Tensor(torch::mkldnn_reorder_conv2d_weight_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::IntArrayRef(input_size))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::optional::IntArrayRef(input_size))); LANTERN_FUNCTION_END } -void* _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt(void* out, void* self, void* padding, void* stride, void* dilation, void* groups) +void* _lantern_mkldnn_reorder_conv3d_weight_out_tensor_tensor_intarrayref_intarrayref_intarrayref_intt_intarrayref(void* out, void* self, void* padding, void* stride, void* dilation, void* groups, void* input_size) { LANTERN_FUNCTION_START return make_raw::Tensor(torch::mkldnn_reorder_conv3d_weight_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::IntArrayRef(padding), from_raw::IntArrayRef(stride), from_raw::IntArrayRef(dilation), from_raw::int64_t(groups), from_raw::optional::IntArrayRef(input_size))); LANTERN_FUNCTION_END } @@ -26490,7 +27599,7 @@ void* _lantern__lstm_mps_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_te void* _lantern_lstm_mps_backward_out_tensor_tensorlist_tensorlist_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensorlist_tensorlist_bool_intt_double_bool_bool_bool(void* out0, void* out1, void* out2, void* grad_y, void* grad_hy, void* grad_cy, void* z_state, void* cell_state_fwd, void* input, void* layersOutputs, void* hx, void* params, void* has_biases, void* num_layers, void* dropout, void* train, void* bidirectional, void* batch_first) { LANTERN_FUNCTION_START - torch::lstm_mps_backward_out(from_raw::Tensor(out0), from_raw::TensorList(out1), from_raw::TensorList(out2), from_raw::Tensor(grad_y), from_raw::optional::Tensor(grad_hy), from_raw::optional::Tensor(grad_cy), from_raw::Tensor(z_state), from_raw::Tensor(cell_state_fwd), from_raw::Tensor(input), from_raw::Tensor(layersOutputs), from_raw::TensorList(hx), from_raw::TensorList(params), from_raw::bool_t(has_biases), from_raw::int64_t(num_layers), from_raw::double_t(dropout), from_raw::bool_t(train), from_raw::bool_t(bidirectional), from_raw::bool_t(batch_first)); + torch::lstm_mps_backward_out(from_raw::Tensor(out0), from_raw::TensorList(out1), from_raw::TensorList(out2), from_raw::optional::Tensor(grad_y), from_raw::optional::Tensor(grad_hy), from_raw::optional::Tensor(grad_cy), from_raw::Tensor(z_state), from_raw::Tensor(cell_state_fwd), from_raw::Tensor(input), from_raw::Tensor(layersOutputs), from_raw::TensorList(hx), from_raw::TensorList(params), from_raw::bool_t(has_biases), from_raw::int64_t(num_layers), from_raw::double_t(dropout), from_raw::bool_t(train), from_raw::bool_t(bidirectional), from_raw::bool_t(batch_first)); return NULL; LANTERN_FUNCTION_END } @@ -26951,14 +28060,6 @@ void* _lantern_remainder_out_tensor_scalar_tensor(void* out, void* self, void* o LANTERN_FUNCTION_END } -void* _lantern_argsort_out_tensor_tensor_bool_intt_bool(void* out, void* self, void* stable, void* dim, void* descending) -{ - LANTERN_FUNCTION_START - return make_raw::Tensor(torch::argsort_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::bool_t(stable), from_raw::int64_t(dim), from_raw::bool_t(descending))); - LANTERN_FUNCTION_END -} - void* _lantern_unfold_backward_out_tensor_tensor_intarrayref_intt_intt_intt(void* out, void* grad_in, void* input_sizes, void* dim, void* size, void* step) { LANTERN_FUNCTION_START @@ -27015,6 +28116,30 @@ void* _lantern__foreach_add_out_tensorlist_tensorlist_scalar(void* out, void* se LANTERN_FUNCTION_END } +void* _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) +{ + LANTERN_FUNCTION_START + torch::_foreach_add_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_add_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_add_out_tensorlist_tensorlist_tensor_scalar(void* out, void* self, void* other, void* alpha) +{ + LANTERN_FUNCTION_START + torch::_foreach_add_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Tensor(other), from_raw::Scalar(alpha)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START @@ -27023,6 +28148,22 @@ void* _lantern__foreach_sub_out_tensorlist_tensorlist_scalar(void* out, void* se LANTERN_FUNCTION_END } +void* _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) +{ + LANTERN_FUNCTION_START + torch::_foreach_sub_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +{ + LANTERN_FUNCTION_START + torch::_foreach_sub_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START @@ -27031,202 +28172,202 @@ void* _lantern__foreach_mul_out_tensorlist_tensorlist_scalar(void* out, void* se LANTERN_FUNCTION_END } -void* _lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) +void* _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_mul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) +void* _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_mul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) +void* _lantern__foreach_mul_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_mul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Tensor(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) +void* _lantern__foreach_div_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) +void* _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); + torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_add_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) +void* _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_add_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sub_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* other, void* alpha) +void* _lantern__foreach_div_out_tensorlist_tensorlist_tensor(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_sub_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other), from_raw::Scalar(alpha)); + torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Tensor(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_mul_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_mul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_div_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) +void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); + torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_add_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_maximum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_add_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sub_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_maximum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_sub_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_div_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_div_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_mul_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_minimum_out_tensorlist_tensorlist_scalar(void* out, void* self, void* scalar) { LANTERN_FUNCTION_START - torch::_foreach_mul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(scalar)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_min_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_minimum_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* other) { LANTERN_FUNCTION_START - torch::_foreach_clamp_min_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(other)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_clamp_max_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_clamp_max_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_maximum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_FUNCTION_START - torch::_foreach_maximum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_minimum_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* scalars) +void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_minimum_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(scalars)); + torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_exp_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) { LANTERN_FUNCTION_START - torch::_foreach_zero_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_zero_tensorlist(void* self) +void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_FUNCTION_START - return make_raw::TensorList(torch::_foreach_zero( - from_raw::TensorList(self))); + torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) { LANTERN_FUNCTION_START - torch::_foreach_sqrt_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); return NULL; LANTERN_FUNCTION_END } @@ -27303,6 +28444,14 @@ void* _lantern__foreach_erfc_out_tensorlist_tensorlist(void* out, void* self) LANTERN_FUNCTION_END } +void* _lantern__foreach_exp_out_tensorlist_tensorlist(void* out, void* self) +{ + LANTERN_FUNCTION_START + torch::_foreach_exp_out(from_raw::TensorList(out), from_raw::TensorList(self)); + return NULL; + LANTERN_FUNCTION_END +} + void* _lantern__foreach_expm1_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START @@ -27319,98 +28468,114 @@ void* _lantern__foreach_floor_out_tensorlist_tensorlist(void* out, void* self) LANTERN_FUNCTION_END } -void* _lantern__foreach_log_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_log_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_frac_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log10_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) { LANTERN_FUNCTION_START - torch::_foreach_log10_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_lerp_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log1p_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) { LANTERN_FUNCTION_START - torch::_foreach_log1p_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_lerp_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_log2_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_log2_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_lgamma_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_neg_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_log_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_neg_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_log_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_log10_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_tan_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_log10_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_log1p_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_tanh_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_log1p_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_log2_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_sin_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_log2_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_max_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_sinh_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_max_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_neg_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_round_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_neg_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lgamma_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_norm_out_tensorlist_tensorlist_scalar_scalartype(void* out, void* self, void* ord, void* dtype) { LANTERN_FUNCTION_START - torch::_foreach_lgamma_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_norm_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(ord), from_raw::optional::ScalarType(dtype)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_frac_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_pow_out_tensorlist_tensorlist_tensorlist(void* out, void* self, void* exponent) { LANTERN_FUNCTION_START - torch::_foreach_frac_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_pow_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(exponent)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_pow_out_tensorlist_tensorlist_scalar(void* out, void* self, void* exponent) +{ + LANTERN_FUNCTION_START + torch::_foreach_pow_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(exponent)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__foreach_pow_out_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* exponent) +{ + LANTERN_FUNCTION_START + torch::_foreach_pow_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::vector::Scalar(exponent)); return NULL; LANTERN_FUNCTION_END } @@ -27423,107 +28588,107 @@ void* _lantern__foreach_reciprocal_out_tensorlist_tensorlist(void* out, void* se LANTERN_FUNCTION_END } -void* _lantern__foreach_sigmoid_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_round_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_sigmoid_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_round_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_trunc_out_tensorlist_tensorlist(void* out, void* self) +void* _lantern__foreach_sigmoid_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_trunc_out(from_raw::TensorList(out), from_raw::TensorList(self)); + torch::_foreach_sigmoid_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sign_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); + torch::_foreach_sign_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensor1, void* tensor2, void* value) +void* _lantern__foreach_sin_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Scalar(value)); + torch::_foreach_sin_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sinh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); + torch::_foreach_sinh_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcdiv_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_sqrt_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcdiv_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); + torch::_foreach_sqrt_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_arrayrefscalar(void* out, void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tan_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::vector::Scalar(scalars)); + torch::_foreach_tan_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_addcmul_out_tensorlist_tensorlist_tensorlist_tensorlist_tensor(void* out, void* self, void* tensor1, void* tensor2, void* scalars) +void* _lantern__foreach_tanh_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_addcmul_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensor1), from_raw::TensorList(tensor2), from_raw::Tensor(scalars)); + torch::_foreach_tanh_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_norm_out_tensorlist_tensorlist_scalar(void* out, void* self, void* ord) +void* _lantern__foreach_trunc_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_norm_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::Scalar(ord)); + torch::_foreach_trunc_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_tensorlist(void* out, void* self, void* tensors1, void* weights) +void* _lantern__foreach_zero_out_tensorlist_tensorlist(void* out, void* self) { LANTERN_FUNCTION_START - torch::_foreach_lerp_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::TensorList(weights)); + torch::_foreach_zero_out(from_raw::TensorList(out), from_raw::TensorList(self)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__foreach_lerp_out_tensorlist_tensorlist_tensorlist_scalar(void* out, void* self, void* tensors1, void* weight) +void* _lantern__foreach_zero_tensorlist(void* self) { LANTERN_FUNCTION_START - torch::_foreach_lerp_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(tensors1), from_raw::Scalar(weight)); - return NULL; + return make_raw::TensorList(torch::_foreach_zero( + from_raw::TensorList(self))); LANTERN_FUNCTION_END } -void* _lantern_bucketize_out_tensor_scalar_tensor_bool_bool(void* out, void* self, void* boundaries, void* out_int32, void* right) +void* _lantern__foreach_copy_out_tensorlist_tensorlist_tensorlist_bool(void* out, void* self, void* src, void* non_blocking) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::bucketize_out( - from_raw::Tensor(out), from_raw::Scalar(self), from_raw::Tensor(boundaries), from_raw::bool_t(out_int32), from_raw::bool_t(right))); + torch::_foreach_copy_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(src), from_raw::bool_t(non_blocking)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern_searchsorted_out_tensor_tensor_scalar_bool_bool_cstringview_tensor(void* out, void* sorted_sequence, void* self, void* out_int32, void* right, void* side, void* sorter) +void* _lantern_bucketize_out_tensor_scalar_tensor_bool_bool(void* out, void* self, void* boundaries, void* out_int32, void* right) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::searchsorted_out( - from_raw::Tensor(out), from_raw::Tensor(sorted_sequence), from_raw::Scalar(self), from_raw::bool_t(out_int32), from_raw::bool_t(right), from_raw::optional::string_view(side), from_raw::optional::Tensor(sorter))); + return make_raw::Tensor(torch::bucketize_out( + from_raw::Tensor(out), from_raw::Scalar(self), from_raw::Tensor(boundaries), from_raw::bool_t(out_int32), from_raw::bool_t(right))); LANTERN_FUNCTION_END } @@ -27651,7 +28816,7 @@ void* _lantern__test_optional_intlist_out_tensor_tensor_intarrayref(void* out, v { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_test_optional_intlist_out( - from_raw::Tensor(out), from_raw::Tensor(values), from_raw::IntArrayRef(addends))); + from_raw::Tensor(out), from_raw::Tensor(values), from_raw::optional::IntArrayRef(addends))); LANTERN_FUNCTION_END } @@ -27659,7 +28824,7 @@ void* _lantern__test_optional_filled_intlist_out_tensor_tensor_intarrayref(void* { LANTERN_FUNCTION_START return make_raw::Tensor(torch::_test_optional_filled_intlist_out( - from_raw::Tensor(out), from_raw::Tensor(values), from_raw::IntArrayRef(addends))); + from_raw::Tensor(out), from_raw::Tensor(values), from_raw::optional::IntArrayRef(addends))); LANTERN_FUNCTION_END } @@ -27987,7 +29152,7 @@ void* _lantern_to_padded_tensor_out_tensor_tensor_double_intarrayref(void* out, { LANTERN_FUNCTION_START return make_raw::Tensor(torch::to_padded_tensor_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::double_t(padding), from_raw::IntArrayRef(output_size))); + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::double_t(padding), from_raw::optional::IntArrayRef(output_size))); LANTERN_FUNCTION_END } @@ -28023,43 +29188,43 @@ void* _lantern__triton_multi_head_attention_out_tensor_tensor_tensor_tensor_intt LANTERN_FUNCTION_END } -void* _lantern__transformer_decoder_only_layer_fwd_out_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_bool_bool_double_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor_tensor(void* out0, void* out1, void* out2, void* src, void* embed_dim, void* num_heads, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* use_gelu, void* norm_first, void* eps, void* norm_weight_1, void* norm_bias_1, void* norm_weight_2, void* norm_bias_2, void* ffn_weight_1, void* ffn_bias_1, void* ffn_weight_2, void* ffn_bias_2, void* mask, void* incr_key, void* incr_value) +void* _lantern__foobar_out_tensor_tensor_bool_bool_bool(void* out, void* self, void* arg1, void* arg2, void* arg3) { LANTERN_FUNCTION_START - return make_raw::tuple(torch::_transformer_decoder_only_layer_fwd_out( - from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(src), from_raw::int64_t(embed_dim), from_raw::int64_t(num_heads), from_raw::Tensor(qkv_weight), from_raw::Tensor(qkv_bias), from_raw::Tensor(proj_weight), from_raw::Tensor(proj_bias), from_raw::bool_t(use_gelu), from_raw::bool_t(norm_first), from_raw::double_t(eps), from_raw::Tensor(norm_weight_1), from_raw::Tensor(norm_bias_1), from_raw::Tensor(norm_weight_2), from_raw::Tensor(norm_bias_2), from_raw::Tensor(ffn_weight_1), from_raw::Tensor(ffn_bias_1), from_raw::Tensor(ffn_weight_2), from_raw::Tensor(ffn_bias_2), from_raw::optional::Tensor(mask), from_raw::optional::Tensor(incr_key), from_raw::optional::Tensor(incr_value))); + return make_raw::Tensor(torch::_foobar_out( + from_raw::Tensor(out), from_raw::Tensor(self), from_raw::bool_t(arg1), from_raw::bool_t(arg2), from_raw::bool_t(arg3))); LANTERN_FUNCTION_END } -void* _lantern__native_decoder_only_multi_head_attention_out_tensor_tensor_tensor_tensor_tensor_tensor_tensor_intt_intt_tensor_tensor_tensor_tensor_tensor_tensor_tensor_bool_bool(void* out0, void* out1, void* out2, void* out3, void* query, void* key, void* value, void* embed_dim, void* num_head, void* qkv_weight, void* qkv_bias, void* proj_weight, void* proj_bias, void* mask, void* incr_key, void* incr_value, void* need_weights, void* average_attn_weights) +void* _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_FUNCTION_START - return make_raw::tuple(torch::_native_decoder_only_multi_head_attention_out( - from_raw::Tensor(out0), from_raw::Tensor(out1), from_raw::Tensor(out2), from_raw::Tensor(out3), from_raw::Tensor(query), from_raw::Tensor(key), from_raw::Tensor(value), from_raw::int64_t(embed_dim), from_raw::int64_t(num_head), from_raw::Tensor(qkv_weight), from_raw::Tensor(qkv_bias), from_raw::Tensor(proj_weight), from_raw::Tensor(proj_bias), from_raw::optional::Tensor(mask), from_raw::optional::Tensor(incr_key), from_raw::optional::Tensor(incr_value), from_raw::bool_t(need_weights), from_raw::bool_t(average_attn_weights))); + torch::_fused_adam_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; LANTERN_FUNCTION_END } -void* _lantern__foobar_out_tensor_tensor_bool_bool_bool(void* out, void* self, void* arg1, void* arg2, void* arg3) +void* _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_FUNCTION_START - return make_raw::Tensor(torch::_foobar_out( - from_raw::Tensor(out), from_raw::Tensor(self), from_raw::bool_t(arg1), from_raw::bool_t(arg2), from_raw::bool_t(arg3))); + return make_raw::tuple(torch::_fused_adam( + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); LANTERN_FUNCTION_END } -void* _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +void* _lantern__fused_adam_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_FUNCTION_START - torch::_fused_adam_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + torch::_fused_adam_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); return NULL; LANTERN_FUNCTION_END } -void* _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +void* _lantern__fused_adam_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) { LANTERN_FUNCTION_START return make_raw::tuple(torch::_fused_adam( - from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); LANTERN_FUNCTION_END } @@ -28079,4 +29244,68 @@ void* _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorli LANTERN_FUNCTION_END } +void* _lantern__fused_adamw_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_adamw_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_adamw_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_tensor_double_double_double_double_bool_bool_tensor_tensor(void* self, void* grads, void* exp_avgs, void* exp_avg_sqs, void* max_exp_avg_sqs, void* state_steps, void* lr, void* beta1, void* beta2, void* weight_decay, void* eps, void* amsgrad, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_fused_adamw( + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(exp_avgs), from_raw::TensorList(exp_avg_sqs), from_raw::TensorList(max_exp_avg_sqs), from_raw::TensorList(state_steps), from_raw::Tensor(lr), from_raw::double_t(beta1), from_raw::double_t(beta2), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(amsgrad), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_sgd_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::double_t(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_fused_sgd( + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::double_t(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd_out_tensorlist_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* out, void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_sgd_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::Tensor(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_sgd_tensorlist_tensorlist_tensorlist_double_double_tensor_double_bool_bool_bool_tensor_tensor(void* self, void* grads, void* momentum_buffer_list, void* weight_decay, void* momentum, void* lr, void* dampening, void* nesterov, void* maximize, void* is_first_step, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_fused_sgd( + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(momentum_buffer_list), from_raw::double_t(weight_decay), from_raw::double_t(momentum), from_raw::Tensor(lr), from_raw::double_t(dampening), from_raw::bool_t(nesterov), from_raw::bool_t(maximize), from_raw::bool_t(is_first_step), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); + LANTERN_FUNCTION_END +} + +void* _lantern__fused_adagrad_out_tensorlist_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* out, void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + torch::_fused_adagrad_out(from_raw::TensorList(out), from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(state_sums), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(lr_decay), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf)); + return NULL; + LANTERN_FUNCTION_END +} + +void* _lantern__fused_adagrad_tensorlist_tensorlist_tensorlist_tensorlist_double_double_double_double_bool_tensor_tensor(void* self, void* grads, void* state_sums, void* state_steps, void* lr, void* lr_decay, void* weight_decay, void* eps, void* maximize, void* grad_scale, void* found_inf) +{ + LANTERN_FUNCTION_START + return make_raw::tuple(torch::_fused_adagrad( + from_raw::TensorList(self), from_raw::TensorList(grads), from_raw::TensorList(state_sums), from_raw::TensorList(state_steps), from_raw::double_t(lr), from_raw::double_t(lr_decay), from_raw::double_t(weight_decay), from_raw::double_t(eps), from_raw::bool_t(maximize), from_raw::optional::Tensor(grad_scale), from_raw::optional::Tensor(found_inf))); + LANTERN_FUNCTION_END +} + /* Autogen Body -- End */ diff --git a/src/lantern/src/utils.cpp b/src/lantern/src/utils.cpp index 4ecb015618..a15db6c4ac 100644 --- a/src/lantern/src/utils.cpp +++ b/src/lantern/src/utils.cpp @@ -12,7 +12,7 @@ void *_lantern_vector_int64_t(int64_t *x, size_t x_size) { LANTERN_FUNCTION_START auto out = std::vector(x, x + x_size); return make_raw::vector::int64_t(out); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(0) } void* _lantern_IntArrayRef_get (void* x) { @@ -25,7 +25,7 @@ void* _lantern_IntArrayRef_get (void* x) { int64_t _lantern_vector_int64_t_size(void *self) { LANTERN_FUNCTION_START return from_raw::vector::int64_t(self).size(); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(0) } int64_t _lantern_vector_int64_t_at(void *self, int64_t index) { @@ -56,7 +56,7 @@ void _lantern_vector_int64_t_push_back(void *self, int64_t x) { double _lantern_vector_double_size(void *self) { LANTERN_FUNCTION_START return reinterpret_cast *>(self)->size(); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(0) } double _lantern_vector_double_at(void *self, int64_t index) { @@ -139,19 +139,19 @@ void *_lantern_bool(bool x) { bool _lantern_bool_get(void *x) { LANTERN_FUNCTION_START return from_raw::bool_t(x); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(false) } int64_t _lantern_int64_t_get(void *x) { LANTERN_FUNCTION_START return from_raw::int64_t(x); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(0) } double _lantern_double_get(void *x) { LANTERN_FUNCTION_START return from_raw::double_t(x); - LANTERN_FUNCTION_END + LANTERN_FUNCTION_END_RET(0) } void *_lantern_vector_get(void *x, int i) { diff --git a/src/torch_api.cpp b/src/torch_api.cpp index 00d93ae567..c2f1f0408c 100644 --- a/src/torch_api.cpp +++ b/src/torch_api.cpp @@ -171,7 +171,9 @@ XPtrTorchOptionalTensor from_sexp_optional_tensor(SEXP x) { } } -void delete_optional_tensor(void* x) { lantern_optional_tensor_delete(x); } +void delete_optional_tensor(void* x) { + lantern_optional_tensor_delete(x); +} // index tensor @@ -217,7 +219,7 @@ XPtrTorchTensorList from_sexp_tensor_list(SEXP x) { } if (Rf_isNull(x)) { - Rcpp::List tmp; // create an empty list + Rcpp::List tmp(0); // create an empty list return cpp_torch_tensor_list(tmp); } @@ -1432,7 +1434,10 @@ XPtrTorchIntArrayRef from_sexp_int_array_ref(SEXP x, bool allow_null, if (allow_null) { return nullptr; } else { - Rcpp::stop("Expected a list of integers and found NULL."); + // this is required by torch_count_nonzero to keep its behavior of not requiring the dim + // argument. + std::vector vec(0); + return XPtrTorchIntArrayRef(lantern_vector_int64_t(vec.data(), vec.size())); } } diff --git a/tests/testthat/test-distributions-constraints.R b/tests/testthat/test-distributions-constraints.R index 169ab27b3c..00ab17c2c9 100644 --- a/tests/testthat/test-distributions-constraints.R +++ b/tests/testthat/test-distributions-constraints.R @@ -13,7 +13,7 @@ test_that("positive definite", { x <- torch_randn(10, 2, 2) expect_tensor_shape(constraint_positive_definite$check(x), c(1, 10)) - x <- torch_ones(10, 2, 2) + x <- torch_diag_embed(-1*torch_ones(10, 2, 2), offset = 0, -2, -1)[,1,,] expect_equal_to_r(constraint_positive_definite$check(x), matrix(FALSE, nrow = 1, ncol = 10)) x <- torch_eye(2, 2)$unsqueeze(1) diff --git a/tests/testthat/test-nn-multihead-attention.R b/tests/testthat/test-nn-multihead-attention.R index 23aee8bfeb..2cd5b1d949 100644 --- a/tests/testthat/test-nn-multihead-attention.R +++ b/tests/testthat/test-nn-multihead-attention.R @@ -32,16 +32,10 @@ test_that("nn_multihead_attention", { t2 <- torch_ones(c(5, 5)) - torch_tril(torch_ones(c(5, 5))) t2 <- t2$to(torch_bool()) - t3 <- torch_bernoulli(torch_ones(c(8, 5)) * 0.5) + t3 <- torch_bernoulli(torch_ones(c(8, 5)) * 0.5)$to(dtype = "bool") out2 <- attn(t1, t1, t1, attn_mask = t2, key_padding_mask = t3) expect_identical(out2[[1]]$size(), c(5L, 8L, 32L)) expect_identical(out2[[2]]$size(), c(8L, 5L, 5L)) - for (i in seq_len(5)) { - expect_equal( - as.matrix(torch_tril(out2[[2]][i, ])), - as.matrix(out2[[2]][i, ]) - ) - } }) diff --git a/tests/testthat/test-nn-normalization.R b/tests/testthat/test-nn-normalization.R index 3a5ab7bd68..6491c24aa4 100644 --- a/tests/testthat/test-nn-normalization.R +++ b/tests/testthat/test-nn-normalization.R @@ -44,5 +44,5 @@ test_that("group_norm", { m <- nn_layer_norm(3) mg <- nn_group_norm(1, 3) - expect_equal_to_tensor(mg(input), m(input)) + expect_equal_to_tensor(mg(input), m(input), tolerance = 1e-6) }) diff --git a/tools/torchgen/R/cpp.R b/tools/torchgen/R/cpp.R index 20aae826f6..ea9e4f425a 100644 --- a/tools/torchgen/R/cpp.R +++ b/tools/torchgen/R/cpp.R @@ -128,7 +128,7 @@ cpp_parameter_type <- function(argument) { argument$name %in% c("dims", "dims_self", "dims_other", "dim") && argument$dynamic_type == "IntArrayRef") { - if (argument$type %in% c("c10::optional", "OptionalIntArrayRef")) { + if ((argument$type %in% c("c10::optional", "OptionalIntArrayRef")) || argument$is_nullable) { return("XPtrTorchOptionalIndexIntArrayRef") } else { return("XPtrTorchIndexIntArrayRef") @@ -138,7 +138,7 @@ cpp_parameter_type <- function(argument) { if (indexing_special_cases(argument) && argument$name %in% c("dim", "dim0", "dim1", "dim2", "start_dim", "end_dim", "index") && argument$dynamic_type == "int64_t") { - if (argument$type == "c10::optional") + if (argument$type == "c10::optional" || argument$is_nullable) return("XPtrTorchoptional_index_int64_t") else return("XPtrTorchindex_int64_t") @@ -146,18 +146,18 @@ cpp_parameter_type <- function(argument) { if (indexing_special_cases(argument) && argument$name == "indices" && - argument$dynamic_type == "TensorList") { + argument$dynamic_type == "TensorList" && + !argument$is_nullable) { return("XPtrTorchIndexTensorList") } if (indexing_special_cases(argument) && argument$name == "indices" && - argument$dynamic_type == "const c10::List> &") { + argument$is_nullable) { return("XPtrTorchOptionalIndexTensorList") } - if (argument$dynamic_type == "Tensor" && - argument$type == "const c10::optional &") { + if (argument$dynamic_type == "Tensor" && argument$is_nullable) { return("XPtrTorchOptionalTensor") } @@ -165,19 +165,19 @@ cpp_parameter_type <- function(argument) { declaration <- "XPtrTorchTensor" } - if (argument$dynamic_type == "bool" && argument$type != "c10::optional") { + if (argument$dynamic_type == "bool" && !argument$is_nullable) { declaration <- "XPtrTorchbool" } - if (argument$dynamic_type == "bool" && argument$type == "c10::optional") { + if (argument$dynamic_type == "bool" && argument$is_nullable) { declaration <- "XPtrTorchoptional_bool" } - if (argument$dynamic_type == "DimnameList" && argument$type != "c10::optional") { + if (argument$dynamic_type == "DimnameList" && !argument$is_nullable) { declaration <- "XPtrTorchDimnameList" } - if (argument$dynamic_type == "DimnameList" && argument$type == "c10::optional") { + if (argument$dynamic_type == "DimnameList" && argument$is_nullable) { declaration <- "XPtrTorchOptionalDimnameList" } @@ -185,35 +185,35 @@ cpp_parameter_type <- function(argument) { declaration <- "XPtrTorchTensorList" } - if (argument$dynamic_type == "IntArrayRef" && argument$type %in% c("c10::optional", "OptionalIntArrayRef")) { + if (argument$dynamic_type == "IntArrayRef" && ((argument$type %in% c("c10::optional", "OptionalIntArrayRef")) || argument$is_nullable)) { return("XPtrTorchOptionalIntArrayRef") } - if (argument$dynamic_type == "IntArrayRef" && argument$type != "c10::optional") { + if (argument$dynamic_type == "IntArrayRef" && !argument$is_nullable) { declaration <- "XPtrTorchIntArrayRef" } - if (argument$dynamic_type == "ArrayRef" && argument$type == "c10::optional>") { + if (argument$dynamic_type == "ArrayRef" && argument$is_nullable) { declaration <- "XPtrTorchOptionalDoubleArrayRef" } - if (argument$dynamic_type == "ArrayRef"&& argument$type != "c10::optional>") { + if (argument$dynamic_type == "ArrayRef" && !argument$is_nullable) { declaration <- "std::vector" } - if (argument$dynamic_type == "int64_t" && !argument$type == "c10::optional") { + if (argument$dynamic_type == "int64_t" && !argument$is_nullable) { declaration <- "XPtrTorchint64_t" } - if (argument$dynamic_type == "int64_t" && argument$type == "c10::optional") { + if (argument$dynamic_type == "int64_t" && argument$is_nullable) { declaration <- "XPtrTorchoptional_int64_t" } - if (argument$dynamic_type == "double" && argument$type == "c10::optional") { + if (argument$dynamic_type == "double" && argument$is_nullable) { declaration <- "XPtrTorchOptionaldouble" } - if (argument$dynamic_type == "double" && !argument$type == "c10::optional") { + if (argument$dynamic_type == "double" && !argument$is_nullable) { declaration <- "XPtrTorchdouble" } @@ -229,27 +229,27 @@ cpp_parameter_type <- function(argument) { declaration <- "XPtrTorchGenerator" } - if (argument$dynamic_type == "Generator" && argument$type != "c10::optional") { + if (argument$dynamic_type == "Generator" && !argument$is_nullable) { declaration <- "XPtrTorchGenerator" } - if (argument$dynamic_type == "Generator" && argument$type == "c10::optional") { + if (argument$dynamic_type == "Generator" && argument$is_nullable) { declaration <- "XPtrTorchOptionalGenerator" } - if (argument$dynamic_type == "ScalarType" && argument$type != "c10::optional") { + if (argument$dynamic_type == "ScalarType" && !argument$is_nullable) { declaration <- "XPtrTorchDtype" } - if (argument$dynamic_type == "ScalarType" && argument$type == "c10::optional") { + if (argument$dynamic_type == "ScalarType" && argument$is_nullable) { declaration <- "XPtrTorchoptional_scalar_type" } - if (argument$dynamic_type == "Scalar" && !stringr::str_detect(argument$type, "optional")) { + if (argument$dynamic_type == "Scalar" && !argument$is_nullable) { declaration <- "XPtrTorchScalar" } - if (argument$dynamic_type == "Scalar" && stringr::str_detect(argument$type, "optional")) { + if (argument$dynamic_type == "Scalar" && argument$is_nullable) { declaration <- "XPtrTorchoptional_scalar" } @@ -261,19 +261,19 @@ cpp_parameter_type <- function(argument) { declaration <- "std::vector" } - if (argument$dynamic_type == "MemoryFormat" && argument$type != "c10::optional") { + if (argument$dynamic_type == "MemoryFormat" && !argument$is_nullable) { declaration <- "XPtrTorchMemoryFormat" } - if (argument$dynamic_type == "MemoryFormat" && argument$type == "c10::optional") { + if (argument$dynamic_type == "MemoryFormat" && argument$is_nullable) { declaration <- "XPtrTorchoptional_memory_format" } - if (argument$dynamic_type == "std::string" && argument$type != "c10::optional") { + if (argument$dynamic_type == "std::string" && !argument$is_nullable) { declaration <- "XPtrTorchstring" } - if (argument$dynamic_type == "std::string" && argument$type == "c10::optional") { + if (argument$dynamic_type == "std::string" && argument$is_nullable) { declaration <- "XPtrTorchoptional_string" } @@ -301,11 +301,11 @@ cpp_parameter_type <- function(argument) { declaration <- "XPtrTorchvector_Scalar" } - if (argument$dynamic_type == "c10::string_view" && argument$type == "c10::optional") { + if (argument$dynamic_type == "c10::string_view" && argument$is_nullable) { declaration <- "XPtrTorchoptional_string_view" } - if (argument$dynamic_type == "c10::string_view" && argument$type != "c10::optional") { + if (argument$dynamic_type == "c10::string_view" && !argument$is_nullable) { declaration <- "XPtrTorchstring_view" } @@ -325,6 +325,14 @@ cpp_parameter_type <- function(argument) { declaration <- "XPtrTorchTensorList" } + if (argument$dynamic_type == "const c10::List<::std::optional> &") { + declaration <- "XPtrTorchOptionalTensorList" + } + + if (argument$dynamic_type == "DeviceIndex") { + declaration <- "int" + } + # FIXME: Stop if argument$dynamic_type is not handled if (!exists("declaration")) { stop(paste(argument$dynamic_type, "is not handled!")) @@ -384,19 +392,19 @@ cpp_argument_transform <- function(argument) { result <- glue::glue("{argument$name}.get()") } - if (argument$dynamic_type == "IntArrayRef" && argument$type != "c10::optional") { + if (argument$dynamic_type == "IntArrayRef" && !argument$is_nullable) { result <- glue::glue("{argument$name}.get()") } - if (argument$dynamic_type == "IntArrayRef" && argument$type %in% c("c10::optional", "OptionalIntArrayRef")) { + if (argument$dynamic_type == "IntArrayRef" && argument$is_nullable) { result <- glue::glue("{argument$name}.get()") } - if (argument$dynamic_type == "ArrayRef" && argument$type != "c10::optional>") { + if (argument$dynamic_type == "ArrayRef" && !argument$is_nullable) { result <- glue::glue("lantern_vector_double({argument$name}.data(), {argument$name}.size())") } - if (argument$dynamic_type == "ArrayRef" && argument$type == "c10::optional>") { + if (argument$dynamic_type == "ArrayRef" && argument$is_nullable) { result <- glue::glue("{argument$name}.get()") } @@ -404,11 +412,11 @@ cpp_argument_transform <- function(argument) { result <- glue::glue("{argument$name}.get()") } - if (argument$dynamic_type == "double" && !argument$type == "c10::optional") { + if (argument$dynamic_type == "double" && !argument$is_nullable) { result <- glue::glue("{argument$name}.get()") } - if (argument$dynamic_type == "double" && argument$type == "c10::optional") { + if (argument$dynamic_type == "double" && argument$is_nullable) { result <- glue::glue("{argument$name}.get()") } @@ -496,6 +504,14 @@ cpp_argument_transform <- function(argument) { result <- glue::glue("{argument$name}.get()") } + if (argument$dynamic_type == "const c10::List<::std::optional> &") { + result <- glue::glue("{argument$name}.get()") + } + + if (argument$dynamic_type == "DeviceIndex") { + result <- glue::glue("{argument$name}") + } + # FIXME: Stop if argument$dynamic_type is not handled if (!exists("result")) { stop(paste(argument$dynamic_type, "is not handled!")) @@ -667,10 +683,21 @@ SKIP_R_BINDIND <- c( "_use_cudnn_rnn_flatten_weight", "is_vulkan_available", "_test_ambiguous_defaults", - "_test_string_default" + "_test_string_default", + "_cufft_get_plan_cache_size", + "_cufft_get_plan_cache_max_size", + "_cufft_set_plan_cache_max_size", + "_cufft_clear_plan_cache", + "sym_size", + "sym_numel", + "sym_stride", + "sym_storage_offset", + "_make_dep_token" ) -SKIP_CPP_BINDING <- c() +SKIP_CPP_BINDING <- c( + "_cufft_get_plan_cache_size" +) cpp <- function(path) { diff --git a/tools/torchgen/R/r.R b/tools/torchgen/R/r.R index 5579bb2982..8623171da6 100644 --- a/tools/torchgen/R/r.R +++ b/tools/torchgen/R/r.R @@ -237,6 +237,22 @@ r_argument_default <- function(default) { if (default == "\"reflect\"") return("\"reflect\"") + + if (default == "::std::nullopt") { + return("NULL") + } + + if (default == "c10::MemoryFormContiguous") { + return("torch_contiguous_format()") + } + + if (default == '""') { + return('""') + } + + if (default == '"right"') { + return('"right"') + } browser() } diff --git a/tools/torchgen/R/utils.R b/tools/torchgen/R/utils.R index 1030fd3e8d..9a4183ba59 100644 --- a/tools/torchgen/R/utils.R +++ b/tools/torchgen/R/utils.R @@ -7,7 +7,7 @@ #' @export declarations <- function() { - version <- getOption("torchgen.version", default = "2.0.1") + version <- getOption("torchgen.version", default = "2.5.1") path <- getOption("torchgen.path") if (is.null(path)) { diff --git a/tools/torchgen/inst/declaration/Declarations-2.0.1.yaml b/tools/torchgen/inst/declaration/Declarations-2.5.1.yaml similarity index 89% rename from tools/torchgen/inst/declaration/Declarations-2.0.1.yaml rename to tools/torchgen/inst/declaration/Declarations-2.5.1.yaml index 88688e1499..eeed871255 100644 --- a/tools/torchgen/inst/declaration/Declarations-2.0.1.yaml +++ b/tools/torchgen/inst/declaration/Declarations-2.5.1.yaml @@ -396,20 +396,20 @@ dynamic_type: at::Tensor is_nullable: true name: gradient - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: retain_graph - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: create_graph type: bool - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, const c10::optional &, c10::optional, bool) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -426,13 +426,13 @@ dynamic_type: at::Tensor is_nullable: true name: gradient - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: retain_graph - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -1018,8 +1018,8 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -1030,7 +1030,7 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -1063,8 +1063,8 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -1075,7 +1075,7 @@ dynamic_type: at::DimnameList is_nullable: true name: names - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -1304,12 +1304,206 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _assert_async + operator_name: _assert_async + overload_name: msg + manual_kernel_registration: false + category_override: '' + schema_string: aten::_assert_async.msg(Tensor self, str assert_msg) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + schema_order_cpp_signature: void (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _assert_scalar + operator_name: _assert_scalar + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_assert_scalar(Scalar self, str assert_msg) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + schema_order_cpp_signature: void (const at::Scalar &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_assert_scalar + operator_name: _functional_assert_scalar + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::string_view, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_assert_async + operator_name: _functional_assert_async + overload_name: msg + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: assert_msg + type: c10::string_view + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _assert_tensor_metadata operator_name: _assert_tensor_metadata overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> () + schema_string: aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> () arguments: - annotation: null dynamic_type: at::Tensor @@ -1317,24 +1511,24 @@ name: a type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: size type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: stride type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: void (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: void (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -1342,23 +1536,23 @@ name: a type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: size type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: stride type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -1372,6 +1566,362 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _print + operator_name: _print + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_print(str s) -> () + arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: s + type: c10::string_view + schema_order_cpp_signature: void (c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: s + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sym_constrain_range + operator_name: sym_constrain_range + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + schema_order_cpp_signature: void (const at::Scalar &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sym_constrain_range_for_size + operator_name: sym_constrain_range_for_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> () + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + schema_order_cpp_signature: void (const at::Scalar &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: min + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: max + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_sym_constrain_range + operator_name: _functional_sym_constrain_range + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _functional_sym_constrain_range_for_size + operator_name: _functional_sym_constrain_range_for_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: size + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: min + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max + type: ::std::optional + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dep_token + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_dep_token + operator_name: _make_dep_token + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: ::std::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: ::std::optional + schema_order_cpp_signature: at::Tensor (::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: refine_names operator_name: refine_names overload_name: '' @@ -1929,7 +2479,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -1939,7 +2489,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -1989,8 +2539,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const c10::optional &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const ::std::optional &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2011,7 +2561,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2021,7 +2571,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2071,7 +2621,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -2136,7 +2686,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2146,17 +2696,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2206,7 +2756,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2217,7 +2767,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2248,7 +2798,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2258,17 +2808,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -2318,7 +2868,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -2382,7 +2932,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (double, bool, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (double, bool, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -2400,33 +2950,33 @@ name: dropout_seed type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2496,12 +3046,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2514,11 +3064,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2614,8 +3164,8 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2631,7 +3181,7 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -2742,8 +3292,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -2774,7 +3324,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -4018,13 +4568,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -4032,12 +4582,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -6045,7 +6595,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor + schema_string: aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -6100,7 +6650,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor + schema_string: aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -6257,6 +6807,51 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _test_functorch_fallback + operator_name: _test_functorch_fallback + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all operator_name: all overload_name: dim @@ -6315,6 +6910,66 @@ with_gil: false deprecated: false has_math_kernel: false +- name: all + operator_name: all + overload_name: dims + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all_out operator_name: all overload_name: out @@ -6386,6 +7041,79 @@ with_gil: false deprecated: false has_math_kernel: false +- name: all_out + operator_name: all + overload_name: dims_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: all operator_name: all overload_name: dimname @@ -6655,37 +7383,31 @@ with_gil: false deprecated: false has_math_kernel: false -- name: any_out +- name: any operator_name: any - overload_name: out + overload_name: dims manual_kernel_registration: false category_override: '' - schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - dynamic_type: int64_t - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: int64_t + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6693,32 +7415,27 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: int64_t - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: int64_t + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & method_of: - Type + - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -6726,30 +7443,37 @@ with_gil: false deprecated: false has_math_kernel: false -- name: any +- name: any_out operator_name: any - overload_name: dimname + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname + dynamic_type: int64_t is_nullable: false name: dim - type: at::Dimname + type: int64_t - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6757,39 +7481,45 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname + dynamic_type: int64_t is_nullable: false name: dim - type: at::Dimname + type: int64_t - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: any_out operator_name: any - overload_name: dimname_out + overload_name: dims_out manual_kernel_registration: false category_override: '' - schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -6804,17 +7534,18 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: at::Dimname + type: at::OptionalIntArrayRef - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -6822,10 +7553,140 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::Dimname - is_nullable: false + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true name: dim - type: at::Dimname + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: any_out + operator_name: any + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname - annotation: null default: false dynamic_type: bool @@ -6874,7 +7735,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -6882,33 +7743,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -6949,7 +7810,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -6962,33 +7823,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -7035,7 +7896,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -7054,33 +7915,33 @@ name: step type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -7275,18 +8136,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7294,11 +8155,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7342,18 +8203,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7361,11 +8222,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7408,18 +8269,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7427,11 +8288,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -7475,18 +8336,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -7494,11 +8355,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -8277,12 +9138,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -8300,11 +9161,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -8345,12 +9206,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -8368,11 +9229,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -9367,7 +10228,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9375,33 +10236,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -9442,7 +10303,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9455,33 +10316,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -9514,22 +10375,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9550,7 +10411,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9561,22 +10422,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9629,12 +10490,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -9660,7 +10521,7 @@ is_nullable: false name: output_zero_point type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9671,12 +10532,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -9734,22 +10595,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9770,7 +10631,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -9781,22 +10642,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9871,27 +10732,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var_transform - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -9912,7 +10773,7 @@ is_nullable: false name: reservedSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -9933,27 +10794,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var_transform - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -10009,13 +10870,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10023,12 +10884,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10066,13 +10927,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10080,12 +10941,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -10127,13 +10988,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -10146,12 +11007,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10187,13 +11048,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -10207,12 +11068,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10247,13 +11108,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10266,12 +11127,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -10316,8 +11177,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10339,7 +11200,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -10378,14 +11239,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10402,7 +11263,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10454,14 +11315,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10478,7 +11339,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10535,14 +11396,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10564,7 +11425,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10621,14 +11482,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10650,7 +11511,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10702,20 +11563,20 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10732,13 +11593,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -10778,14 +11639,14 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t is_nullable: false name: minlength type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -10797,7 +11658,7 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -11241,12 +12102,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_not - operator_name: logical_not +- name: _lazy_clone + operator_name: _lazy_clone overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_not(Tensor self) -> Tensor + schema_string: aten::_lazy_clone(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -11277,119 +12138,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_not_ - operator_name: logical_not_ - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) - arguments: - - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: self - type: at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &) - schema_order_arguments: - - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: self - type: at::Tensor & - method_of: - - Type - - Tensor - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: self - type: at::Tensor & - inplace: true - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: logical_not_out +- name: logical_not operator_name: logical_not - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: logical_xor - operator_name: logical_xor overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + schema_string: aten::logical_not(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & method_of: - Type - Tensor @@ -11407,35 +12174,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_xor_ - operator_name: logical_xor_ +- name: logical_not_ + operator_name: logical_not_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) arguments: - annotation: a! dynamic_type: at::Tensor is_nullable: false name: self type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor & (at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor is_nullable: false name: self type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & method_of: - Type - Tensor @@ -11452,12 +12209,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: logical_xor_out - operator_name: logical_xor +- name: logical_not_out + operator_name: logical_not overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -11471,23 +12228,163 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: other - type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor + operator_name: logical_xor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_ + operator_name: logical_xor_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_out + operator_name: logical_xor + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -11830,7 +12727,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -11838,33 +12735,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -11905,7 +12802,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -11918,33 +12815,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -13371,18 +14268,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13390,17 +14287,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13435,14 +14332,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13454,13 +14351,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13491,18 +14388,18 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -13510,17 +14407,17 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13555,14 +14452,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -13574,13 +14471,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -13618,18 +14515,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13637,17 +14534,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -13695,14 +14592,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -13714,13 +14611,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -14361,18 +15258,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14380,17 +15277,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14425,14 +15322,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14444,13 +15341,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14481,18 +15378,18 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -14500,17 +15397,17 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14545,14 +15442,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -14564,13 +15461,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -14608,18 +15505,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14627,17 +15524,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -14685,14 +15582,14 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -14704,13 +15601,13 @@ dynamic_type: at::Tensor is_nullable: true name: min - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: max - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -15047,7 +15944,7 @@ name: self type: const at::Tensor & - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: false kwarg_only: true @@ -15061,7 +15958,7 @@ name: self type: const at::Tensor & - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: false kwarg_only: true @@ -15088,7 +15985,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor + schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15104,7 +16001,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15135,7 +16032,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15151,7 +16048,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15203,7 +16100,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -15344,7 +16241,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15360,7 +16257,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15391,7 +16288,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15407,7 +16304,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15459,7 +16356,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -15593,7 +16490,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15609,7 +16506,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15660,7 +16557,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15676,7 +16573,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15748,7 +16645,7 @@ overload_name: deprecated manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15764,7 +16661,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15810,7 +16707,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15826,7 +16723,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15893,7 +16790,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor + schema_string: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -15909,7 +16806,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15930,7 +16827,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -15946,7 +16843,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -15988,23 +16885,23 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggI - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggW - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggb - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -16055,23 +16952,23 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggI - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggW - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: ggb - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -16149,7 +17046,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16166,7 +17063,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16194,7 +17091,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16211,7 +17108,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16260,7 +17157,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16277,7 +17174,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16305,7 +17202,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16322,7 +17219,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16371,7 +17268,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16388,7 +17285,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16416,7 +17313,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16433,7 +17330,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16482,7 +17379,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16499,7 +17396,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16526,7 +17423,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16543,7 +17440,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16591,7 +17488,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16608,7 +17505,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16635,7 +17532,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16652,7 +17549,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16700,7 +17597,7 @@ overload_name: padding manual_kernel_registration: false category_override: '' - schema_string: aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16717,7 +17614,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16744,7 +17641,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -16761,7 +17658,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -16957,7 +17854,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor + schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -16974,7 +17871,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17009,7 +17906,7 @@ name: dilation size: 1 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17026,7 +17923,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17082,7 +17979,7 @@ overload_name: input manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor + schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -17099,7 +17996,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17134,7 +18031,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17151,7 +18048,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17207,7 +18104,7 @@ overload_name: input manual_kernel_registration: false category_override: '' - schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor + schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -17224,7 +18121,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17259,7 +18156,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17276,7 +18173,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -17923,12 +18820,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -17936,11 +18833,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -17983,15 +18880,15 @@ is_nullable: true kwarg_only: true name: fweights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: aweights - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18011,14 +18908,14 @@ is_nullable: true kwarg_only: true name: fweights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: aweights - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -18245,17 +19142,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -18271,7 +19168,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18287,17 +19184,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -18364,22 +19261,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -18390,7 +19287,7 @@ is_nullable: false name: reserveSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18411,22 +19308,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -18464,7 +19361,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18574,13 +19471,20 @@ with_gil: false deprecated: false has_math_kernel: false -- name: cudnn_convolution_transpose - operator_name: cudnn_convolution_transpose - overload_name: '' +- name: cudnn_convolution_out + operator_name: cudnn_convolution + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + schema_string: aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -18596,11 +19500,6 @@ is_nullable: false name: padding type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18631,7 +19530,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18648,11 +19547,6 @@ is_nullable: false name: padding type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18683,6 +19577,13 @@ is_nullable: false name: allow_tf32 type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - namespace @@ -18690,8 +19591,8 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false is_factory_method: false abstract: true @@ -18699,12 +19600,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _mps_convolution_transpose - operator_name: _mps_convolution_transpose +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18741,76 +19642,201 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) - schema_order_arguments: - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: output_padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: bool is_nullable: false - name: stride - type: at::IntArrayRef + name: benchmark + type: bool - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: bool is_nullable: false - name: dilation - type: at::IntArrayRef + name: deterministic + type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: bool is_nullable: false - name: groups - type: int64_t - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: mps_convolution_transpose_backward - operator_name: mps_convolution_transpose_backward - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) - arguments: + name: allow_tf32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_output - type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mps_convolution_transpose + operator_name: _mps_convolution_transpose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mps_convolution_transpose_backward + operator_name: mps_convolution_transpose_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -18917,7 +19943,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -18933,7 +19959,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -18954,7 +19980,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -18970,7 +19996,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19012,7 +20038,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -19033,12 +20059,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19059,7 +20085,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -19080,12 +20106,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -19967,13 +20993,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -19986,12 +21012,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20027,13 +21053,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20046,12 +21072,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20093,13 +21119,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20112,12 +21138,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20159,13 +21185,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20178,12 +21204,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20219,13 +21245,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20238,12 +21264,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20285,13 +21311,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20304,12 +21330,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20416,13 +21442,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20435,12 +21461,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20476,13 +21502,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20495,12 +21521,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20542,13 +21568,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20561,12 +21587,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -20608,13 +21634,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20627,12 +21653,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20668,13 +21694,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -20687,12 +21713,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -20734,13 +21760,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -20753,12 +21779,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -22040,14 +23066,14 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22071,13 +23097,13 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & + type: const ::std::optional & method_of: - Type - Tensor @@ -22131,14 +23157,14 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const ::std::optional &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22162,13 +23188,13 @@ dynamic_type: at::Tensor is_nullable: true name: prepend - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: append - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -22205,19 +23231,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: spacing - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22225,7 +23251,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, const c10::optional &, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, const ::std::optional &, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22233,19 +23259,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: spacing - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22422,12 +23448,12 @@ name: spacing type: at::ArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22435,7 +23461,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22449,12 +23475,12 @@ name: spacing type: at::ArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22570,12 +23596,12 @@ name: spacing type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22583,7 +23609,7 @@ kwarg_only: true name: edge_order type: int64_t - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, c10::optional, int64_t) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22597,12 +23623,12 @@ name: spacing type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -22871,8 +23897,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -22889,7 +23915,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -22929,8 +23955,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -22947,7 +23973,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -22993,8 +24019,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23011,7 +24037,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -23148,8 +24174,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23166,7 +24192,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23206,8 +24232,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23224,7 +24250,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23504,8 +24530,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23522,7 +24548,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23562,8 +24588,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23580,7 +24606,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23626,8 +24652,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23644,7 +24670,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -23690,8 +24716,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -23708,7 +24734,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -23748,8 +24774,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -23766,7 +24792,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -24252,7 +25278,7 @@ name: tensors type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -24271,7 +25297,7 @@ name: tensors type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -24719,7 +25745,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -24732,7 +25758,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -24772,7 +25798,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -24997,14 +26023,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: include_last_offset type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25044,7 +26070,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25117,7 +26143,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -25127,8 +26153,8 @@ dynamic_type: int64_t is_nullable: true name: padding_idx - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25164,7 +26190,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -25174,7 +26200,7 @@ dynamic_type: int64_t is_nullable: true name: padding_idx - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -25245,7 +26271,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25258,7 +26284,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25298,7 +26324,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -25397,14 +26423,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, bool, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, bool, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25460,7 +26486,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25478,11 +26504,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _embedding_bag_sparse_backward operator_name: _embedding_bag_sparse_backward overload_name: '' @@ -25534,14 +26560,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25587,7 +26613,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25661,14 +26687,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -25714,7 +26740,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -25851,7 +26877,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -25860,13 +26886,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -25878,42 +26904,42 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -25950,13 +26976,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -25964,40 +26990,120 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_permuted + operator_name: empty_permuted + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: physical_layout + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: physical_layout + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -26038,7 +27144,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26051,33 +27157,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26123,7 +27229,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26141,33 +27247,33 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26213,7 +27319,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26231,33 +27337,33 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26298,7 +27404,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26311,33 +27417,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26378,7 +27484,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26391,33 +27497,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26439,7 +27545,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + schema_string: aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26468,13 +27574,13 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, double, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, double, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26482,33 +27588,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: double @@ -26524,12 +27630,12 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26551,7 +27657,7 @@ overload_name: '' manual_kernel_registration: false category_override: factory - schema_string: aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + schema_string: aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26584,13 +27690,13 @@ name: options type: at::TensorOptions - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26616,40 +27722,40 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26684,13 +27790,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -26703,12 +27809,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -26730,7 +27836,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!) + schema_string: aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) arguments: - annotation: a! dynamic_type: at::Tensor @@ -26805,13 +27911,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26824,40 +27930,40 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -26894,13 +28000,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -26908,12 +28014,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -26957,13 +28063,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -26971,40 +28077,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27045,7 +28151,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -27058,33 +28164,33 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27815,7 +28921,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -27829,7 +28935,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -27837,33 +28943,33 @@ name: n type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27885,7 +28991,7 @@ overload_name: m manual_kernel_registration: false category_override: '' - schema_string: aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -27904,7 +29010,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -27917,33 +29023,33 @@ name: m type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -27965,7 +29071,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -28014,7 +29120,7 @@ overload_name: m_out manual_kernel_registration: false category_override: '' - schema_string: aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -28321,7 +29427,7 @@ overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) + schema_string: aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -28377,7 +29483,7 @@ overload_name: Dimname manual_kernel_registration: false category_override: '' - schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -28932,11 +30038,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: floor_divide_ operator_name: floor_divide_ overload_name: Scalar @@ -28977,11 +30083,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: frac operator_name: frac overload_name: '' @@ -29125,7 +30231,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -29133,7 +30239,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -29150,35 +30256,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29219,7 +30325,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -29232,33 +30338,33 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29359,13 +30465,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -29378,40 +30484,40 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -29441,17 +30547,17 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -29459,7 +30565,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (c10::string_view, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (c10::string_view, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: c10::string_view @@ -29467,45 +30573,45 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30427,7 +31533,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30435,33 +31541,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30502,7 +31608,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30515,33 +31621,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30577,7 +31683,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30585,33 +31691,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30652,7 +31758,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30665,33 +31771,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30737,7 +31843,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30755,33 +31861,33 @@ name: alpha type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30832,7 +31938,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30855,33 +31961,33 @@ name: beta type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30917,7 +32023,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -30925,33 +32031,33 @@ name: window_length type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -30992,7 +32098,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -31005,33 +32111,33 @@ name: periodic type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -31077,7 +32183,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -31095,33 +32201,33 @@ name: beta type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -31229,13 +32335,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -31248,7 +32354,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const ::std::optional &, const ::std::optional &, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31265,13 +32371,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -31316,12 +32422,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31347,7 +32453,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31358,12 +32464,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31442,7 +32548,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31468,7 +32574,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -31494,7 +32600,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -31691,7 +32797,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor + schema_string: aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -31756,7 +32862,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -32061,20 +33167,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_get_plan_cache_size(int device_index) -> int + schema_string: aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: int64_t (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: int64_t (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32096,20 +33202,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_get_plan_cache_max_size(int device_index) -> int + schema_string: aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: int64_t (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: int64_t (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32131,25 +33237,25 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () + schema_string: aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> () arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex - annotation: null dynamic_type: int64_t is_nullable: false name: max_size type: int64_t - schema_order_cpp_signature: void (int64_t, int64_t) + schema_order_cpp_signature: void (at::DeviceIndex, int64_t) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex - annotation: null dynamic_type: int64_t is_nullable: false @@ -32173,20 +33279,20 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_cufft_clear_plan_cache(int device_index) -> () + schema_string: aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> () arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t - schema_order_cpp_signature: void (int64_t) + type: at::DeviceIndex + schema_order_cpp_signature: void (at::DeviceIndex) schema_order_arguments: - annotation: null - dynamic_type: int64_t + dynamic_type: at::DeviceIndex is_nullable: false name: device_index - type: int64_t + type: at::DeviceIndex method_of: - Type - namespace @@ -32213,11 +33319,11 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &) + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32225,10 +33331,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & method_of: - Type - Tensor @@ -32266,11 +33372,11 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, at::Tensor &) + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32278,10 +33384,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -32305,6 +33411,181 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _unsafe_index + operator_name: _unsafe_index + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_masked_index + operator_name: _unsafe_masked_index + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::List<::std::optional> &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_masked_index_put_accumulate + operator_name: _unsafe_masked_index_put_accumulate + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_masked_index_put_accumulate(Tensor self, Tensor mask, Tensor?[] indices, Tensor values) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: index_copy_out operator_name: index_copy overload_name: out @@ -32659,10 +33940,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32674,7 +33955,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -32682,10 +33963,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32727,10 +34008,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32742,7 +34023,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32750,10 +34031,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32782,6 +34063,73 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _unsafe_index_put + operator_name: _unsafe_index_put + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List<::std::optional> & + is_nullable: true + name: indices + type: const c10::List<::std::optional> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _index_put_impl_ operator_name: _index_put_impl_ overload_name: '' @@ -32795,10 +34143,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32816,7 +34164,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool) + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -32824,10 +34172,10 @@ name: self type: at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -32877,22 +34225,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -32913,7 +34261,7 @@ is_nullable: false name: cudnn_enabled type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -32924,22 +34272,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -34536,13 +35884,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -34555,7 +35903,7 @@ is_nullable: false name: cudnn_enable type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34572,13 +35920,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1.0e-05 dynamic_type: double @@ -34628,18 +35976,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34655,12 +36003,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -34724,18 +36072,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34766,12 +36114,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false @@ -34799,6 +36147,75 @@ with_gil: false deprecated: false has_math_kernel: false +- name: rms_norm + operator_name: rms_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rms_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const ::std::optional &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: nan_to_num operator_name: nan_to_num overload_name: '' @@ -34812,24 +36229,24 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34837,23 +36254,23 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -34884,24 +36301,24 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -34909,23 +36326,23 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -34963,24 +36380,24 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -34988,23 +36405,23 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: nan - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: posinf - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: neginf - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -35050,8 +36467,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35068,7 +36485,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -35185,8 +36602,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35203,7 +36620,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -35249,8 +36666,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -35267,7 +36684,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -35478,6 +36895,759 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _cslt_compress + operator_name: _cslt_compress + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_compress(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cslt_sparse_mm + operator_name: _cslt_sparse_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: alg_id + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: alg_id + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cslt_sparse_mm_search + operator_name: _cslt_sparse_mm_search + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense_B + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: alpha + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_tile + operator_name: _sparse_semi_structured_tile + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '""' + dynamic_type: c10::string_view + is_nullable: false + name: algorithm + type: c10::string_view + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: use_cutlass + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '""' + dynamic_type: c10::string_view + is_nullable: false + name: algorithm + type: c10::string_view + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: use_cutlass + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_apply + operator_name: _sparse_semi_structured_apply + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_apply_dense + operator_name: _sparse_semi_structured_apply_dense + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: thread_masks + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_linear + operator_name: _sparse_semi_structured_linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: meta + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: meta + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_mm + operator_name: _sparse_semi_structured_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_semi_structured_addmm + operator_name: _sparse_semi_structured_addmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1_meta + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: out_dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mixed_dtypes_linear + operator_name: _mixed_dtypes_linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: bias + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: activation + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: fbgemm_linear_int8_weight_fp32_activation operator_name: fbgemm_linear_int8_weight_fp32_activation overload_name: '' @@ -35747,6 +37917,166 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _wrapped_linear_prepack + operator_name: _wrapped_linear_prepack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_wrapped_linear_prepack(Tensor weight, Tensor weight_scale, Tensor weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _wrapped_quantized_linear_prepacked + operator_name: _wrapped_quantized_linear_prepacked + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_wrapped_quantized_linear_prepacked(Tensor input, Tensor input_scale, Tensor input_zero_point, Tensor packed_weight, Tensor output_scale, Tensor output_zero_point, int out_channel) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_channel + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output_zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_channel + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: fbgemm_linear_fp16_weight_fp32_activation operator_name: fbgemm_linear_fp16_weight_fp32_activation overload_name: '' @@ -36127,7 +38457,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -36145,33 +38475,33 @@ name: steps type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -36188,59 +38518,80 @@ with_gil: false deprecated: false has_math_kernel: false -- name: linspace_out +- name: linspace operator_name: linspace - overload_name: out + overload_name: Tensor_Tensor manual_kernel_registration: false - category_override: '' - schema_string: aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + category_override: factory + schema_string: aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: start - type: const at::Scalar & + type: const at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: end - type: const at::Scalar & + type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false name: steps type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &) + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: start - type: const at::Scalar & + type: const at::Tensor & - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::Tensor is_nullable: false name: end - type: const at::Scalar & + type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false name: steps type: int64_t - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -36248,37 +38599,91 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false - is_factory_method: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log - operator_name: log - overload_name: '' +- name: linspace + operator_name: linspace + overload_name: Tensor_Scalar manual_kernel_registration: false - category_override: '' - schema_string: aten::log(Tensor self) -> Tensor + category_override: factory + schema_string: aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: start type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: start type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - - Tensor - namespace mode: native python_module: '' @@ -36287,54 +38692,108 @@ name: result type: at::Tensor inplace: false - is_factory_method: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log_ - operator_name: log_ - overload_name: '' +- name: linspace + operator_name: linspace + overload_name: Scalar_Tensor manual_kernel_registration: false - category_override: '' - schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + category_override: factory + schema_string: aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - - annotation: a! + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: at::Tensor & - schema_order_cpp_signature: at::Tensor & (at::Tensor &) + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - - annotation: a! + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: at::Tensor & + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: self - type: at::Tensor & - inplace: true - is_factory_method: false + name: result + type: at::Tensor + inplace: false + is_factory_method: true abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: log_out - operator_name: log +- name: linspace_out + operator_name: linspace overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -36344,28 +38803,376 @@ output: true type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: + name: start + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: self - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: Scalar_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log + operator_name: log + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_ + operator_name: log_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_out + operator_name: log + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native python_module: '' returns: - dynamic_type: at::Tensor @@ -37392,7 +40199,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -37416,33 +40223,339 @@ name: base type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional method_of: - Type - namespace @@ -37540,6 +40653,249 @@ with_gil: false deprecated: false has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: Scalar_Tensor_out + manual_kernel_registration: false + category_override: factory + schema_string: aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: log_softmax operator_name: log_softmax overload_name: int @@ -37558,12 +40914,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37576,11 +40932,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -37623,12 +40979,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37641,11 +40997,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -37687,13 +41043,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -37706,12 +41062,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -39126,12 +42482,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39139,7 +42495,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -39147,12 +42503,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39211,12 +42567,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -39224,7 +42580,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -39232,12 +42588,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -40975,6 +44331,107 @@ with_gil: false deprecated: false has_math_kernel: false +- name: quantized_max_pool3d + operator_name: quantized_max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: max_pool3d operator_name: max_pool3d overload_name: '' @@ -41089,13 +44546,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41103,12 +44560,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41126,6 +44583,69 @@ with_gil: false deprecated: false has_math_kernel: false +- name: mean_out + operator_name: mean + overload_name: dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: mean operator_name: mean overload_name: dim @@ -41151,13 +44671,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41177,12 +44697,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41232,13 +44752,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41258,12 +44778,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -41312,13 +44832,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41338,12 +44858,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41393,13 +44913,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41419,12 +44939,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -41461,7 +44981,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41474,13 +44994,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41488,7 +45008,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41501,12 +45021,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -41544,7 +45064,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41557,13 +45077,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -41571,7 +45091,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -41584,12 +45104,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -42769,7 +46289,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -42785,7 +46305,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -42806,7 +46326,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -42822,7 +46342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -42864,7 +46384,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + schema_string: aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -42975,7 +46495,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor + schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -42991,7 +46511,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43012,7 +46532,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43028,7 +46548,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43320,17 +46840,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43381,7 +46901,7 @@ is_nullable: false name: workspace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43437,17 +46957,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43553,17 +47073,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43579,7 +47099,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43595,17 +47115,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -43669,28 +47189,28 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43711,22 +47231,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -43759,7 +47279,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -43775,7 +47295,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43806,7 +47326,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43822,7 +47342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43874,7 +47394,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -43890,7 +47410,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43926,7 +47446,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -43942,7 +47462,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -43999,7 +47519,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44015,7 +47535,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44046,7 +47566,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44062,7 +47582,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44114,7 +47634,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44130,7 +47650,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44151,7 +47671,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44167,7 +47687,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44209,7 +47729,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + schema_string: aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -44230,12 +47750,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44256,7 +47776,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44277,12 +47797,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -44350,7 +47870,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44395,8 +47915,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44422,7 +47942,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44467,7 +47987,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -44532,7 +48052,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44542,17 +48062,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44597,7 +48117,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44608,7 +48128,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -44639,7 +48159,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44649,17 +48169,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -44704,7 +48224,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -44845,6 +48365,275 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _int_mm + operator_name: _int_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_int_mm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _int_mm_out + operator_name: _int_mm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_weight_to_int4pack + operator_name: _convert_weight_to_int4pack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: innerKTiles + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: innerKTiles + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_int4pack_mm + operator_name: _weight_int4pack_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: qGroupSize + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qScaleAndZeros + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: qGroupSize + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qScaleAndZeros + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_int8pack_mm + operator_name: _weight_int8pack_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _sparse_mm operator_name: _sparse_mm overload_name: '' @@ -46251,7 +50040,7 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: false with_gil: false deprecated: false @@ -46338,22 +50127,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46369,7 +50158,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46380,22 +50169,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46470,22 +50259,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46501,7 +50290,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46512,22 +50301,22 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46602,12 +50391,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46633,7 +50422,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46644,12 +50433,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46697,6 +50486,107 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _native_batch_norm_legit_no_training + operator_name: _native_batch_norm_legit_no_training + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _native_batch_norm_legit_out operator_name: _native_batch_norm_legit overload_name: out @@ -46734,12 +50624,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46765,7 +50655,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46776,12 +50666,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: a! dynamic_type: at::Tensor is_nullable: false @@ -46866,12 +50756,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46887,7 +50777,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -46898,12 +50788,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46978,12 +50868,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -46999,7 +50889,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47010,12 +50900,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47138,12 +51028,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47159,7 +51049,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47170,12 +51060,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47230,12 +51120,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47251,7 +51141,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47262,12 +51152,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -47332,12 +51222,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47353,7 +51243,7 @@ is_nullable: false name: count type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47374,12 +51264,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47440,12 +51330,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47461,7 +51351,7 @@ is_nullable: false name: counts type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47482,12 +51372,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47543,27 +51433,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47579,7 +51469,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47595,27 +51485,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47684,7 +51574,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47700,7 +51590,7 @@ is_nullable: false name: bias_g type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47726,7 +51616,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -47772,7 +51662,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor + schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -47798,23 +51688,23 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: count type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47840,16 +51730,16 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -47888,18 +51778,18 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: momentum type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -47910,12 +51800,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -47995,7 +51885,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor + schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -48011,7 +51901,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -48025,7 +51915,7 @@ name: stride size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48041,7 +51931,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -48088,7 +51978,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -48096,7 +51986,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -48108,35 +51998,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48172,7 +52062,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -48180,33 +52070,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48292,13 +52182,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48306,40 +52196,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48461,12 +52351,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48485,11 +52375,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -48577,8 +52467,8 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -48599,7 +52489,7 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -49460,7 +53350,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::channel_shuffle(Tensor self, int groups) -> Tensor + schema_string: aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -49505,7 +53395,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::native_channel_shuffle(Tensor self, int groups) -> Tensor + schema_string: aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -49558,12 +53448,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: bool (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: bool (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -49571,11 +53461,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -49605,12 +53495,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -49618,11 +53508,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -49652,12 +53542,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -49665,11 +53555,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50080,7 +53970,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -50088,33 +53978,33 @@ name: s type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50148,7 +54038,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50156,7 +54046,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50168,35 +54058,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50230,13 +54120,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50244,7 +54134,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50256,41 +54146,41 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50326,7 +54216,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50334,33 +54224,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50394,7 +54284,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -50402,7 +54292,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50414,35 +54304,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50532,8 +54422,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -50545,7 +54435,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -50589,13 +54479,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -50603,40 +54493,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50658,7 +54548,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50677,7 +54567,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50695,28 +54585,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50738,7 +54628,7 @@ overload_name: generator manual_kernel_registration: false category_override: '' - schema_string: aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50755,7 +54645,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -50763,7 +54653,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50780,35 +54670,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50830,7 +54720,7 @@ overload_name: low manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50854,7 +54744,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50877,28 +54767,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -50920,7 +54810,7 @@ overload_name: low_generator manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -50942,7 +54832,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -50950,7 +54840,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -50972,35 +54862,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51022,7 +54912,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51081,7 +54971,7 @@ overload_name: generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51105,8 +54995,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -51123,7 +55013,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51152,7 +55042,7 @@ overload_name: low_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51221,7 +55111,7 @@ overload_name: low_generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -51250,8 +55140,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -51273,7 +55163,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51302,7 +55192,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + schema_string: aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -51322,13 +55212,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51341,40 +55231,40 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51396,7 +55286,7 @@ overload_name: low_dtype manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + schema_string: aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -51421,13 +55311,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51445,40 +55335,40 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51514,7 +55404,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51522,33 +55412,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51582,7 +55472,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51590,7 +55480,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51602,35 +55492,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51664,7 +55554,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51672,7 +55562,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51684,35 +55574,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51746,13 +55636,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -51760,7 +55650,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51772,41 +55662,41 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -51896,8 +55786,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -51909,7 +55799,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -51953,13 +55843,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -51967,40 +55857,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52022,7 +55912,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -52036,7 +55926,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52049,28 +55939,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52092,7 +55982,7 @@ overload_name: generator manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -52104,7 +55994,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::TensorOptions @@ -52112,7 +56002,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52124,35 +56014,35 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: at::kLong dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52174,7 +56064,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -52223,7 +56113,7 @@ overload_name: generator_out manual_kernel_registration: false category_override: '' - schema_string: aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -52242,8 +56132,8 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -52255,7 +56145,7 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -52309,7 +56199,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -52328,33 +56218,33 @@ name: step type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -52395,7 +56285,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: const at::Scalar & @@ -52408,33 +56298,33 @@ name: end type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53030,7 +56920,7 @@ overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53038,13 +56928,13 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53052,12 +56942,12 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53079,7 +56969,7 @@ overload_name: self_Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53092,19 +56982,19 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53117,18 +57007,18 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -53151,7 +57041,7 @@ overload_name: self_int manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor + schema_string: aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -53164,19 +57054,19 @@ name: repeats type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53189,18 +57079,18 @@ name: repeats type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -53764,12 +57654,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -53795,11 +57685,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -53847,12 +57737,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -53878,11 +57768,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -55810,12 +59700,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -55823,11 +59713,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -55858,12 +59748,12 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -55871,11 +59761,11 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -55913,12 +59803,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -55926,11 +59816,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -56480,227 +60370,132 @@ with_gil: false deprecated: false has_math_kernel: true -- name: slice - operator_name: slice - overload_name: Tensor +- name: sym_size + operator_name: sym_size + overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + schema_string: aten::sym_size.int(Tensor self, int dim) -> SymInt arguments: - - annotation: a + - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: 0 dynamic_type: int64_t is_nullable: false name: dim type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &, int64_t) schema_order_arguments: - - annotation: a + - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: 0 dynamic_type: int64_t is_nullable: false name: dim type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: false with_gil: false deprecated: false - has_math_kernel: false -- name: slice_backward - operator_name: slice_backward + has_math_kernel: true +- name: sym_numel + operator_name: sym_numel overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + schema_string: aten::sym_numel(Tensor self) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_output + name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: input_sizes - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: start - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: end - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_output + name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: input_sizes - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: start - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: end - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: false with_gil: false deprecated: false - has_math_kernel: false -- name: slice_scatter - operator_name: slice_scatter + has_math_kernel: true +- name: sym_storage_offset + operator_name: sym_storage_offset overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + schema_string: aten::sym_storage_offset(Tensor self) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: src - type: const at::Tensor & - - annotation: null - default: 0 - dynamic_type: int64_t - is_nullable: false - name: dim - type: int64_t - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: start - type: c10::optional - - annotation: null - default: c10::nullopt - dynamic_type: int64_t - is_nullable: true - name: end - type: c10::optional - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: step - type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: c10::SymInt (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: c10::SymInt + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: slice + operator_name: slice + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + arguments: + - annotation: a dynamic_type: at::Tensor is_nullable: false - name: src + name: self type: const at::Tensor & - annotation: null default: 0 @@ -56709,17 +60504,321 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_backward + operator_name: slice_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_inverse + operator_name: slice_inverse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_scatter + operator_name: slice_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -56919,12 +61018,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -56947,11 +61046,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -57033,12 +61132,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57051,11 +61150,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -57098,12 +61197,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57116,11 +61215,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -57162,13 +61261,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -57181,12 +61280,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -58563,6 +62662,130 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _chunk_cat + operator_name: _chunk_cat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _chunk_cat_out + operator_name: _chunk_cat + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_chunks + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: stack operator_name: stack overload_name: '' @@ -59049,23 +63272,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -59073,18 +63296,18 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59097,23 +63320,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -59121,17 +63344,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59167,23 +63390,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59203,18 +63426,18 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, c10::string_view, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, c10::string_view, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59227,23 +63450,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59263,17 +63486,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: return_complex - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59309,23 +63532,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59339,24 +63562,24 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: length - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: return_complex type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, bool, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, bool, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59369,23 +63592,23 @@ name: n_fft type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: hop_length - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: win_length - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: window - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -59399,17 +63622,17 @@ name: normalized type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: onesided - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: length - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -59524,12 +63747,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sum - operator_name: sum - overload_name: '' +- name: sym_stride + operator_name: sym_stride + overload_name: int manual_kernel_registration: false category_override: '' - schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + schema_string: aten::sym_stride.int(Tensor self, int dim) -> SymInt arguments: - annotation: null dynamic_type: at::Tensor @@ -59537,75 +63760,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - method_of: - - Type - - Tensor - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: sum - operator_name: sum - overload_name: dim_IntList - manual_kernel_registration: false - category_override: '' - schema_string: aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor - arguments: - - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: true name: dim - size: 1 - type: at::OptionalIntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: int64_t + schema_order_cpp_signature: c10::SymInt (const at::Tensor &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59613,47 +63772,32 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: true - name: dim - size: 1 - type: at::OptionalIntArrayRef - - annotation: null - default: false - dynamic_type: bool + dynamic_type: int64_t is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: dim + type: int64_t method_of: - Type - - Tensor - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: int64_t name: result - type: at::Tensor + type: c10::SymInt inplace: false is_factory_method: false - abstract: true - device_guard: true + abstract: false + device_guard: false with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true - name: sum operator_name: sum - overload_name: dim_DimnameList + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -59661,25 +63805,13 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::DimnameList - is_nullable: false - name: dim - size: 1 - type: at::DimnameList - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59687,24 +63819,12 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: at::DimnameList - is_nullable: false - name: dim - size: 1 - type: at::DimnameList - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -59717,25 +63837,18 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sum_out + has_math_kernel: false +- name: sum operator_name: sum - overload_name: IntList_out + overload_name: dim_IntList manual_kernel_registration: false category_override: '' - schema_string: aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -59754,13 +63867,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59780,28 +63893,22 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + type: ::std::optional method_of: - Type + - Tensor - namespace mode: native python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -59809,20 +63916,13 @@ with_gil: false deprecated: false has_math_kernel: false -- name: sum_out +- name: sum operator_name: sum - overload_name: DimnameList_out + overload_name: dim_DimnameList manual_kernel_registration: false category_override: '' - schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -59841,13 +63941,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -59867,12 +63967,180 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sum_out + operator_name: sum + overload_name: IntList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: at::OptionalIntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_out + operator_name: sum + overload_name: DimnameList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -59978,7 +64246,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -59991,13 +64259,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60005,7 +64273,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60018,12 +64286,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -60061,7 +64329,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60074,13 +64342,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60088,7 +64356,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -60101,12 +64369,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -60135,7 +64403,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sum_to_size(Tensor self, int[] size) -> Tensor + schema_string: aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -60542,7 +64810,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -60550,19 +64818,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60570,7 +64838,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60578,19 +64846,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60744,7 +65012,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -60752,19 +65020,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60772,7 +65040,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60780,19 +65048,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60898,7 +65166,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -60912,12 +65180,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -60925,7 +65193,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -60939,12 +65207,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61061,7 +65329,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -61076,19 +65344,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61096,7 +65364,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61104,19 +65372,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61309,7 +65577,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -61323,12 +65591,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61336,7 +65604,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61350,12 +65618,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61385,7 +65653,7 @@ overload_name: correction_names_out manual_kernel_registration: false category_override: '' - schema_string: aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -61406,12 +65674,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61419,7 +65687,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61433,12 +65701,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -61482,13 +65750,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61496,12 +65764,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61543,13 +65811,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61568,12 +65836,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61622,13 +65890,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61647,12 +65915,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -61700,13 +65968,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61725,12 +65993,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -61779,13 +66047,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -61804,12 +66072,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -62285,11 +66553,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true - name: threshold operator_name: threshold overload_name: '' @@ -62598,7 +66866,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::tile(Tensor self, int[] dims) -> Tensor + schema_string: aten::tile(Tensor self, SymInt[] dims) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63086,7 +67354,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor + schema_string: aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63737,19 +68005,19 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _nested_tensor_offsets - operator_name: _nested_tensor_offsets +- name: _nested_tensor_storage_offsets + operator_name: _nested_tensor_storage_offsets overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_tensor_offsets(Tensor self) -> int[] + schema_string: aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -63762,9 +68030,9 @@ mode: native python_module: '' returns: - - dynamic_type: at::IntArrayRef + - dynamic_type: at::Tensor name: result - type: ::std::vector + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -63822,7 +68090,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a) + schema_string: aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) arguments: - annotation: a dynamic_type: at::Tensor @@ -63840,11 +68108,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -63862,10 +68130,10 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef + type: const at::Tensor & method_of: - Type - namespace @@ -63887,7 +68155,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor + schema_string: aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -63905,11 +68173,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -63927,10 +68195,10 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef + type: const at::Tensor & method_of: - Type - namespace @@ -63947,55 +68215,579 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _trilinear - operator_name: _trilinear +- name: _nested_view_from_jagged + operator_name: _nested_view_from_jagged overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + schema_string: aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor(a) arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: i1 + name: offsets type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: i2 + name: dummy type: const at::Tensor & - annotation: null + default: '{}' dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t is_nullable: false - name: i3 + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: expand1 - type: at::IntArrayRef + name: offsets + type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: expand2 - type: at::IntArrayRef + name: dummy + type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t is_nullable: false - name: expand3 - type: at::IntArrayRef + name: ragged_idx + type: int64_t - annotation: null - dynamic_type: at::IntArrayRef + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_view_from_jagged_copy + operator_name: _nested_view_from_jagged_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: sumdim - type: at::IntArrayRef + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & - annotation: null default: 1 dynamic_type: int64_t is_nullable: false - name: unroll_dim + name: ragged_idx type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values + operator_name: _nested_get_values + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values_copy + operator_name: _nested_get_values_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values_copy(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_offsets + operator_name: _nested_get_offsets + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_offsets(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_lengths + operator_name: _nested_get_lengths + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_lengths(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_ragged_idx + operator_name: _nested_get_ragged_idx + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_ragged_idx(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_min_seqlen + operator_name: _nested_get_min_seqlen + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_min_seqlen(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_max_seqlen + operator_name: _nested_get_max_seqlen + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_max_seqlen(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_jagged_dummy + operator_name: _nested_get_jagged_dummy + overload_name: '' + manual_kernel_registration: false + category_override: dummy + schema_string: aten::_nested_get_jagged_dummy(Tensor any) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: any + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: any + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_compute_contiguous_strides_offsets + operator_name: _nested_compute_contiguous_strides_offsets + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: nested_size + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: nested_size + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _trilinear + operator_name: _trilinear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -64675,12 +69467,12 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -64700,11 +69492,11 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -65028,18 +69820,18 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: N - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false name: increasing type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65047,11 +69839,11 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: N - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -65199,7 +69991,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -65207,19 +69999,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65227,7 +70019,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65235,19 +70027,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65362,7 +70154,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -65377,19 +70169,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65397,7 +70189,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65405,19 +70197,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65610,7 +70402,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor + schema_string: aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -65624,12 +70416,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65637,7 +70429,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65651,12 +70443,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65686,7 +70478,7 @@ overload_name: correction_names_out manual_kernel_registration: false category_override: '' - schema_string: aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -65707,12 +70499,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65720,7 +70512,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65734,12 +70526,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65899,7 +70691,7 @@ overload_name: correction manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -65907,19 +70699,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -65927,7 +70719,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -65935,19 +70727,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66053,7 +70845,7 @@ overload_name: correction_names manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) + schema_string: aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -66067,12 +70859,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66080,7 +70872,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -66094,12 +70886,12 @@ size: 1 type: at::DimnameList - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -66846,7 +71638,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -66854,7 +71646,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66866,35 +71658,35 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -66916,7 +71708,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66930,7 +71722,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -66938,33 +71730,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67000,7 +71792,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -67008,33 +71800,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67120,13 +71912,13 @@ name: options type: at::TensorOptions - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67134,40 +71926,40 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67242,12 +72034,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67255,11 +72047,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67344,12 +72136,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67357,11 +72149,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67391,12 +72183,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67404,11 +72196,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67443,12 +72235,12 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67461,11 +72253,11 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67545,7 +72337,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -67561,8 +72353,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67573,7 +72365,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -67589,7 +72381,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67606,6 +72398,515 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _batch_norm_with_update + operator_name: _batch_norm_with_update + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_with_update_out + operator_name: _batch_norm_with_update + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) + arguments: + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: e! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: f! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + - allocate: true + annotation: g! + dynamic_type: at::Tensor + is_nullable: false + name: reserve + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: e! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: f! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + - allocate: true + annotation: g! + dynamic_type: at::Tensor + is_nullable: false + name: reserve + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_mean + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_invstd + type: at::Tensor & + - dynamic_type: at::Tensor + name: reserve + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_no_update + operator_name: _batch_norm_no_update + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward + operator_name: batch_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: update + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: update + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _sparse_sum operator_name: _sparse_sum overload_name: '' @@ -67874,13 +73175,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67900,12 +73201,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -67947,13 +73248,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -67973,12 +73274,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68013,12 +73314,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68031,11 +73332,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68070,13 +73371,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68089,12 +73390,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68249,12 +73550,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68267,11 +73568,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68306,13 +73607,13 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68325,12 +73626,12 @@ name: dim type: at::Dimname - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68490,12 +73791,12 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68513,11 +73814,11 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -68550,14 +73851,14 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68568,7 +73869,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false @@ -68656,7 +73957,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68674,7 +73975,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68685,7 +73986,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68736,7 +74037,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68749,7 +74050,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68760,7 +74061,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68813,7 +74114,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68831,7 +74132,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68842,7 +74143,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68906,7 +74207,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68919,7 +74220,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -68930,7 +74231,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -68982,7 +74283,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69000,7 +74301,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69011,7 +74312,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69062,7 +74363,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69075,7 +74376,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69086,7 +74387,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69139,7 +74440,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69157,7 +74458,7 @@ kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69168,7 +74469,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69232,7 +74533,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69245,7 +74546,7 @@ is_nullable: false name: keepdim type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69256,7 +74557,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::DimnameList is_nullable: false @@ -69790,13 +75091,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -69804,12 +75105,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -69881,13 +75182,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -69900,12 +75201,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -71767,89 +77068,103 @@ with_gil: false deprecated: false has_math_kernel: false -- name: sparse_compressed_tensor - operator_name: sparse_compressed_tensor - overload_name: comp_plain_value_size +- name: _scaled_mm + operator_name: _scaled_mm + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_scaled_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - dynamic_type: at::TensorOptions + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: scale_result + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool is_nullable: false - kwarg_only: true - name: options - type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + name: use_fast_accum + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: bias + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + name: scale_result + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Device + default: ::std::nullopt + dynamic_type: at::ScalarType is_nullable: true - kwarg_only: true - name: device - type: c10::optional + name: out_dtype + type: ::std::optional - annotation: null default: false dynamic_type: bool - is_nullable: true - kwarg_only: true - name: pin_memory - type: c10::optional + is_nullable: false + name: use_fast_accum + type: bool method_of: - Type - namespace @@ -71860,95 +77175,123 @@ name: result type: at::Tensor inplace: false - is_factory_method: true - abstract: false + is_factory_method: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_csr_tensor - operator_name: sparse_csr_tensor - overload_name: crow_col_value_size + has_math_kernel: false +- name: _scaled_mm_out + operator_name: _scaled_mm + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_scaled_mm.out(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False, *, Tensor(a!) out) -> Tensor(a!) arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - dynamic_type: at::TensorOptions + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: scale_result + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: out_dtype + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool is_nullable: false - kwarg_only: true - name: options - type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + name: use_fast_accum + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: self type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: mat2 type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: values + name: scale_a type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: scale_b + type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + name: bias + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout + default: '{}' + dynamic_type: at::Tensor is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + name: scale_result + type: const ::std::optional & - annotation: null - default: c10::nullopt - dynamic_type: at::Device + default: ::std::nullopt + dynamic_type: at::ScalarType is_nullable: true - kwarg_only: true - name: device - type: c10::optional + name: out_dtype + type: ::std::optional - annotation: null default: false dynamic_type: bool - is_nullable: true - kwarg_only: true - name: pin_memory - type: c10::optional + is_nullable: false + name: use_fast_accum + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & method_of: - Type - namespace @@ -71956,98 +77299,108 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result - type: at::Tensor + name: out + type: at::Tensor & inplace: false - is_factory_method: true - abstract: false + is_factory_method: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_csc_tensor - operator_name: sparse_csc_tensor - overload_name: ccol_row_value_size + has_math_kernel: false +- name: _sparse_compressed_tensor_with_dims + operator_name: _sparse_compressed_tensor_with_dims + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: ccol_indices - type: const at::Tensor & + name: nnz + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: row_indices - type: const at::Tensor & + name: dense_dim + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: values - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: blocksize type: at::IntArrayRef + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: index_dtype + type: at::ScalarType - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, at::IntArrayRef, at::ScalarType, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: ccol_indices - type: const at::Tensor & + name: nnz + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: row_indices - type: const at::Tensor & + name: dense_dim + type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: values - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: blocksize type: at::IntArrayRef - annotation: null - default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: false + name: index_dtype + type: at::ScalarType + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72059,27 +77412,27 @@ type: at::Tensor inplace: false is_factory_method: true - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_bsr_tensor - operator_name: sparse_bsr_tensor - overload_name: crow_col_value_size + has_math_kernel: false +- name: sparse_compressed_tensor + operator_name: sparse_compressed_tensor + overload_name: comp_plain_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72097,17 +77450,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72120,33 +77473,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72158,27 +77511,27 @@ type: at::Tensor inplace: false is_factory_method: true - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: sparse_bsc_tensor - operator_name: sparse_bsc_tensor - overload_name: ccol_row_value_size + has_math_kernel: false +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor + overload_name: crow_col_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72196,17 +77549,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72219,33 +77572,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72262,45 +77615,50 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_compressed_tensor - operator_name: sparse_compressed_tensor - overload_name: comp_plain_value +- name: sparse_csc_tensor + operator_name: sparse_csc_tensor + overload_name: ccol_row_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72308,33 +77666,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72351,12 +77714,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_csr_tensor - operator_name: sparse_csr_tensor - overload_name: crow_col_value +- name: sparse_bsr_tensor + operator_name: sparse_bsr_tensor + overload_name: crow_col_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72373,13 +77736,18 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72397,33 +77765,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72440,12 +77813,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_csc_tensor - operator_name: sparse_csc_tensor - overload_name: ccol_row_value +- name: sparse_bsc_tensor + operator_name: sparse_bsc_tensor + overload_name: ccol_row_value_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72462,13 +77835,18 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72486,33 +77864,38 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72529,12 +77912,101 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_bsr_tensor - operator_name: sparse_bsr_tensor +- name: sparse_compressed_tensor + operator_name: sparse_compressed_tensor + overload_name: comp_plain_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: plain_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: compressed_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: plain_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor overload_name: crow_col_value manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72557,7 +78029,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72575,33 +78047,33 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72618,12 +78090,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: sparse_bsc_tensor - operator_name: sparse_bsc_tensor +- name: sparse_csc_tensor + operator_name: sparse_csc_tensor overload_name: ccol_row_value manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -72646,7 +78118,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -72664,33 +78136,33 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72707,22 +78179,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_compressed_tensor_unsafe - operator_name: _sparse_compressed_tensor_unsafe - overload_name: '' +- name: sparse_bsr_tensor + operator_name: sparse_bsr_tensor + overload_name: crow_col_value manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72730,28 +78202,22 @@ name: values type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: size - type: at::IntArrayRef - - annotation: null - default: '{}' dynamic_type: at::TensorOptions is_nullable: false kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: compressed_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: plain_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72759,38 +78225,122 @@ name: values type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_bsc_tensor + operator_name: sparse_bsc_tensor + overload_name: ccol_row_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: size - type: at::IntArrayRef + name: ccol_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ccol_indices + type: const at::Tensor & - annotation: null - default: c10::nullopt + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72807,22 +78357,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_csr_tensor_unsafe - operator_name: _sparse_csr_tensor_unsafe +- name: _sparse_compressed_tensor_unsafe + operator_name: _sparse_compressed_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72841,17 +78391,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: compressed_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: plain_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72864,33 +78414,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -72907,22 +78457,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_csc_tensor_unsafe - operator_name: _sparse_csc_tensor_unsafe +- name: _sparse_csr_tensor_unsafe + operator_name: _sparse_csr_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72941,17 +78491,17 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ccol_indices + name: crow_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: row_indices + name: col_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -72964,33 +78514,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73007,22 +78557,22 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _sparse_bsr_tensor_unsafe - operator_name: _sparse_bsr_tensor_unsafe +- name: _sparse_csc_tensor_unsafe + operator_name: _sparse_csc_tensor_unsafe overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: crow_indices + name: ccol_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: col_indices + name: row_indices type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -73041,8 +78591,79 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ccol_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: row_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_bsr_tensor_unsafe + operator_name: _sparse_bsr_tensor_unsafe + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -73064,33 +78685,62 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73141,7 +78791,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73164,33 +78814,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73225,7 +78875,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -73233,33 +78883,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -73281,7 +78931,7 @@ overload_name: indices manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73300,7 +78950,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73313,33 +78970,40 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73361,7 +79025,7 @@ overload_name: indices_size manual_kernel_registration: false category_override: '' - schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73385,7 +79049,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73403,33 +79074,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73451,7 +79129,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -73475,7 +79153,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73493,33 +79178,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73541,7 +79233,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () + schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> () arguments: - annotation: null dynamic_type: at::Tensor @@ -73558,7 +79250,13 @@ is_nullable: false name: size type: at::IntArrayRef - schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -73575,6 +79273,12 @@ is_nullable: false name: size type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -73936,7 +79640,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -73954,33 +79658,33 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -74002,7 +79706,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor arguments: - annotation: null dynamic_type: int64_t @@ -74035,7 +79739,14 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -74063,33 +79774,40 @@ name: values type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional method_of: - Type - namespace @@ -74281,6 +79999,63 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _sparse_mask_projection + operator_name: _sparse_mask_projection + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate_matches + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate_matches + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _to_cpu operator_name: _to_cpu overload_name: '' @@ -74321,7 +80096,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + schema_string: aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74329,12 +80104,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74342,11 +80124,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: masked_grad + type: ::std::optional method_of: - Type - Tensor @@ -74368,7 +80157,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + schema_string: aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74376,12 +80165,18 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74389,11 +80184,17 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional method_of: - Type - Tensor @@ -74415,7 +80216,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor + schema_string: aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -74427,7 +80228,13 @@ is_nullable: false name: input type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -74439,6 +80246,12 @@ is_nullable: false name: input type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional method_of: - Type - namespace @@ -75355,17 +81168,17 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false -- name: to_sparse - operator_name: to_sparse - overload_name: '' + has_math_kernel: true +- name: _to_sparse + operator_name: _to_sparse + overload_name: sparse_dim manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + schema_string: aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75373,43 +81186,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::Layout - is_nullable: true - kwarg_only: true - name: layout - type: c10::optional + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: - annotation: null - default: c10::nullopt - dynamic_type: at::IntArrayRef - is_nullable: true - kwarg_only: true - name: blocksize - size: 2 - type: at::OptionalIntArrayRef + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & - annotation: null - default: c10::nullopt dynamic_type: int64_t - is_nullable: true - kwarg_only: true - name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::OptionalIntArrayRef, c10::optional) - schema_order_arguments: + is_nullable: false + name: sparse_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_sparse + operator_name: to_sparse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -75417,12 +81246,120 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse + operator_name: _to_sparse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + kwarg_only: true + name: blocksize + size: 2 + type: at::OptionalIntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dense_dim + type: ::std::optional method_of: - Type - Tensor @@ -75452,12 +81389,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_csr + operator_name: _to_sparse_csr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75465,11 +81449,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75499,12 +81483,59 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_csc + operator_name: _to_sparse_csc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75512,11 +81543,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75552,12 +81583,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75571,11 +81602,70 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_bsr + operator_name: _to_sparse_bsr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional method_of: - Type - Tensor @@ -75611,12 +81701,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75630,11 +81720,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75646,6 +81736,103 @@ type: at::Tensor inplace: false is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_sparse_bsc + operator_name: _to_sparse_bsc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: blocksize + size: 2 + type: at::IntArrayRef + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: dense_dim + type: ::std::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _to_sparse_semi_structured + operator_name: _to_sparse_semi_structured + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false abstract: true device_guard: true with_gil: false @@ -75664,12 +81851,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75677,11 +81864,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -75703,7 +81890,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor + schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75738,7 +81925,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -75778,7 +81965,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -75804,7 +81991,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor + schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -75838,7 +82025,13 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::OptionalIntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -75872,6 +82065,12 @@ is_nullable: false name: groups type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef method_of: - Type - namespace @@ -78328,13 +84527,13 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -78342,33 +84541,33 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -78377,12 +84576,12 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -78433,13 +84632,13 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78447,33 +84646,33 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -78489,12 +84688,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78546,12 +84745,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Device, at::ScalarType, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Device, at::ScalarType, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78581,11 +84780,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78632,12 +84831,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78662,11 +84861,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -78713,12 +84912,12 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: a dynamic_type: at::Tensor @@ -78743,11 +84942,11 @@ name: copy type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -79160,12 +85359,12 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::can_cast(ScalarType from, ScalarType to) -> bool + schema_string: aten::can_cast(ScalarType from_, ScalarType to) -> bool arguments: - annotation: null dynamic_type: at::ScalarType is_nullable: false - name: from + name: from_ type: at::ScalarType - annotation: null dynamic_type: at::ScalarType @@ -79177,7 +85376,7 @@ - annotation: null dynamic_type: at::ScalarType is_nullable: false - name: from + name: from_ type: at::ScalarType - annotation: null dynamic_type: at::ScalarType @@ -79415,23 +85614,23 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) + schema_string: aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y - type: const at::Tensor & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79492,23 +85691,23 @@ is_nullable: false name: batch_first type: bool - schema_order_cpp_signature: ::std::tuple,::std::vector> (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_cpp_signature: ::std::tuple,::std::vector> (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y - type: const at::Tensor & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79618,14 +85817,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -79647,13 +85846,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -79687,12 +85886,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79713,18 +85912,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79778,12 +85977,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79804,18 +86003,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79875,12 +86074,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79895,12 +86094,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79911,18 +86110,18 @@ is_nullable: false name: cy type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -79937,12 +86136,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -80008,14 +86207,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -80037,13 +86236,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -80161,13 +86360,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -80193,12 +86392,12 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81209,14 +87408,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81243,13 +87442,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81301,14 +87500,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81335,13 +87534,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81390,14 +87589,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81424,13 +87623,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -81479,14 +87678,14 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -81513,13 +87712,13 @@ dynamic_type: at::Tensor is_nullable: true name: b_ih - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: b_hh - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -83162,6 +89361,61 @@ with_gil: false deprecated: false has_math_kernel: false +- name: masked_scatter_backward + operator_name: masked_scatter_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _masked_softmax operator_name: _masked_softmax overload_name: '' @@ -83180,18 +89434,18 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -83204,17 +89458,17 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -83254,12 +89508,12 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -83277,11 +89531,11 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -86648,11 +92902,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_and_ operator_name: bitwise_and_ overload_name: Tensor @@ -87039,11 +93293,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_or operator_name: bitwise_or overload_name: Scalar_Tensor @@ -87175,11 +93429,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_or_ operator_name: bitwise_or_ overload_name: Tensor @@ -87566,11 +93820,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_xor operator_name: bitwise_xor overload_name: Scalar_Tensor @@ -87702,11 +93956,11 @@ type: at::Tensor & inplace: true is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: bitwise_xor_ operator_name: bitwise_xor_ overload_name: Tensor @@ -89512,15 +95766,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89536,14 +95790,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89578,13 +95832,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89597,12 +95851,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89632,13 +95886,13 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89646,12 +95900,12 @@ name: self type: at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89693,13 +95947,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89719,12 +95973,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89766,13 +96020,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89792,12 +96046,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89839,13 +96093,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89865,12 +96119,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89906,13 +96160,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89926,12 +96180,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -89966,13 +96220,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -89985,12 +96239,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -90141,12 +96395,12 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -90159,11 +96413,11 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -90205,12 +96459,12 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -90223,11 +96477,11 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -90493,7 +96747,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -90517,28 +96771,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -90585,7 +96839,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -90609,28 +96863,28 @@ is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -94068,12 +100322,12 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -94086,11 +100340,11 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -94132,12 +100386,12 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -94150,11 +100404,11 @@ name: indices type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -94732,6 +100986,143 @@ with_gil: false deprecated: false has_math_kernel: false +- name: nonzero_static_out + operator_name: nonzero_static + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero_static + operator_name: nonzero_static + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: size + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: fill_value + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: nonzero_numpy operator_name: nonzero_numpy overload_name: '' @@ -95716,7 +102107,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -95735,7 +102126,7 @@ is_nullable: false name: label_smoothing type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, double) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -95752,7 +102143,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -96235,7 +102626,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor + schema_string: aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -96243,13 +102634,13 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: N - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -96257,12 +102648,12 @@ name: x type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: N - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -98044,13 +104435,13 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -98069,12 +104460,12 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -98122,13 +104513,13 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -98147,12 +104538,12 @@ name: replacement type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -99775,7 +106166,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99783,7 +106174,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99801,7 +106192,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99869,7 +106260,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99877,7 +106268,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99895,7 +106286,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99960,19 +106351,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -99980,7 +106371,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -99994,19 +106385,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100070,19 +106461,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100090,7 +106481,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100104,19 +106495,19 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100164,19 +106555,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100184,7 +106575,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100197,19 +106588,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100251,19 +106642,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100271,7 +106662,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100284,19 +106675,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100343,7 +106734,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100351,7 +106742,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100369,7 +106760,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100411,24 +106802,24 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100441,17 +106832,17 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100497,24 +106888,24 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100527,17 +106918,17 @@ name: bins type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -100583,24 +106974,24 @@ name: bins type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool is_nullable: false name: density type: bool - schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, c10::optional>, const c10::optional &, bool) + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, ::std::optional>, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -100613,17 +107004,17 @@ name: bins type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -101932,6 +108323,55 @@ with_gil: false deprecated: false has_math_kernel: false +- name: min_out + operator_name: min + overload_name: unary_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: fmin operator_name: fmin overload_name: '' @@ -102665,11 +109105,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102683,7 +109123,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102696,11 +109136,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102756,11 +109196,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102774,7 +109214,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102787,11 +109227,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102846,11 +109286,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102864,7 +109304,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102877,11 +109317,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102937,11 +109377,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -102955,7 +109395,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -102968,11 +109408,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103027,11 +109467,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103045,7 +109485,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103058,11 +109498,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103118,11 +109558,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103136,7 +109576,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103149,11 +109589,11 @@ name: q type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103208,11 +109648,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103226,7 +109666,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional, bool, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103239,11 +109679,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103299,11 +109739,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103317,7 +109757,7 @@ kwarg_only: true name: interpolation type: c10::string_view - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, bool, c10::string_view, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103330,11 +109770,11 @@ name: q type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -103500,7 +109940,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103515,7 +109955,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, int64_t, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103527,7 +109967,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103661,7 +110101,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103676,7 +110116,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, int64_t, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103688,7 +110128,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -103852,7 +110292,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -103866,7 +110306,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, at::Dimname, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -103878,7 +110318,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104009,7 +110449,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104023,7 +110463,7 @@ kwarg_only: true name: descending type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, ::std::optional, at::Dimname, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -104035,7 +110475,7 @@ is_nullable: true kwarg_only: true name: stable - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::Dimname is_nullable: false @@ -104287,11 +110727,100 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false + has_math_kernel: true +- name: argsort_out + operator_name: argsort + overload_name: stable_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: stable + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: stable + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: argsort operator_name: argsort overload_name: dimname @@ -104355,7 +110884,7 @@ overload_name: values manual_kernel_registration: false category_override: '' - schema_string: aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + schema_string: aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) arguments: - allocate: true annotation: a! @@ -104473,7 +111002,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + schema_string: aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) arguments: - annotation: null dynamic_type: at::Tensor @@ -105954,13 +112483,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -105980,12 +112509,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - Tensor @@ -106027,13 +112556,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106053,12 +112582,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106101,13 +112630,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106121,12 +112650,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106169,13 +112698,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106189,12 +112718,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106236,13 +112765,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (double, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (double, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: double @@ -106255,12 +112784,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106302,13 +112831,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (double, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (double, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -106321,12 +112850,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106368,13 +112897,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106387,12 +112916,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106434,13 +112963,13 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -106453,12 +112982,12 @@ name: std type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106498,12 +113027,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::TensorOptions @@ -106511,7 +113040,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (double, double, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (double, double, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: double @@ -106529,40 +113058,40 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -106609,13 +113138,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (double, double, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (double, double, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: double @@ -106633,12 +113162,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -106922,12 +113451,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -106935,11 +113464,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -106947,9 +113483,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -106967,12 +113510,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -106980,11 +113523,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -106992,9 +113542,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107009,12 +113566,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul - operator_name: _foreach_mul - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107022,11 +113579,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107034,10 +113591,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107054,12 +113611,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_ - operator_name: _foreach_mul_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107067,11 +113624,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107079,10 +113636,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107096,12 +113653,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div - operator_name: _foreach_div - overload_name: Scalar +- name: _foreach_add + operator_name: _foreach_add + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107109,11 +113666,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107121,9 +113685,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107141,12 +113712,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_ - operator_name: _foreach_div_ - overload_name: Scalar +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107154,11 +113725,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107166,9 +113744,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107183,12 +113768,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min - operator_name: _foreach_clamp_min +- name: _foreach_sub + operator_name: _foreach_sub overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107228,12 +113813,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_ - operator_name: _foreach_clamp_min_ +- name: _foreach_sub_ + operator_name: _foreach_sub_ overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107270,12 +113855,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: Scalar +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107283,11 +113868,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107295,9 +113887,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107315,12 +113914,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: Scalar +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107328,11 +113927,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107340,9 +113946,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & method_of: - Type @@ -107357,12 +113970,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum - operator_name: _foreach_maximum - overload_name: Scalar +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107370,11 +113983,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107382,10 +113995,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107402,12 +114015,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_ - operator_name: _foreach_maximum_ - overload_name: Scalar +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107415,11 +114028,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107427,10 +114040,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107444,12 +114057,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum +- name: _foreach_mul + operator_name: _foreach_mul overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + schema_string: aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107489,12 +114102,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ +- name: _foreach_mul_ + operator_name: _foreach_mul_ overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107531,12 +114144,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add - operator_name: _foreach_add +- name: _foreach_mul + operator_name: _foreach_mul overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + schema_string: aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107548,14 +114161,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107567,13 +114173,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & method_of: - Type - namespace @@ -107590,12 +114189,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_ - operator_name: _foreach_add_ +- name: _foreach_mul_ + operator_name: _foreach_mul_ overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107607,14 +114206,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107626,13 +114218,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & method_of: - Type - namespace @@ -107646,12 +114231,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub - overload_name: List +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + schema_string: aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107659,18 +114244,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107678,17 +114256,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107705,12 +114276,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ - overload_name: List +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + schema_string: aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107718,18 +114289,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107737,17 +114301,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -107763,10 +114320,10 @@ has_math_kernel: false - name: _foreach_mul operator_name: _foreach_mul - overload_name: List + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107774,11 +114331,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107786,10 +114343,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -107808,10 +114365,10 @@ has_math_kernel: false - name: _foreach_mul_ operator_name: _foreach_mul_ - overload_name: List + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107819,11 +114376,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107831,10 +114388,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -107850,10 +114407,10 @@ has_math_kernel: false - name: _foreach_div operator_name: _foreach_div - overload_name: List + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107861,11 +114418,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -107873,10 +114430,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -107895,10 +114452,10 @@ has_math_kernel: false - name: _foreach_div_ operator_name: _foreach_div_ - overload_name: List + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -107906,11 +114463,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -107918,10 +114475,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -107935,12 +114492,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min - operator_name: _foreach_clamp_min +- name: _foreach_div + operator_name: _foreach_div overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -107980,12 +114537,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_ - operator_name: _foreach_clamp_min_ +- name: _foreach_div_ + operator_name: _foreach_div_ overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108022,12 +114579,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: List +- name: _foreach_div + operator_name: _foreach_div + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108035,11 +114592,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108047,10 +114604,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -108067,12 +114624,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: List +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108080,11 +114637,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108092,10 +114649,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -108109,12 +114666,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum - operator_name: _foreach_maximum - overload_name: List +- name: _foreach_div + operator_name: _foreach_div + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108122,11 +114679,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108134,10 +114691,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -108154,12 +114711,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_ - operator_name: _foreach_maximum_ - overload_name: List +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108167,11 +114724,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108179,10 +114736,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & method_of: - Type - namespace @@ -108196,12 +114753,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum - overload_name: List +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] + schema_string: aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108209,11 +114766,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108221,10 +114778,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108241,12 +114798,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ - overload_name: List +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () + schema_string: aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108254,11 +114811,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108266,10 +114823,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108283,12 +114840,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add - operator_name: _foreach_add - overload_name: ScalarList +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108296,11 +114853,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108308,10 +114865,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108328,12 +114885,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_ - operator_name: _foreach_add_ - overload_name: ScalarList +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108341,11 +114898,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108353,10 +114910,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108370,12 +114927,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub - operator_name: _foreach_sub +- name: _foreach_clamp_max + operator_name: _foreach_clamp_max overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108415,12 +114972,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_ - operator_name: _foreach_sub_ +- name: _foreach_clamp_max_ + operator_name: _foreach_clamp_max_ overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108457,12 +115014,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div - operator_name: _foreach_div - overload_name: ScalarList +- name: _foreach_clamp_min + operator_name: _foreach_clamp_min + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108470,11 +115027,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108482,10 +115039,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108502,12 +115059,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_ - operator_name: _foreach_div_ - overload_name: ScalarList +- name: _foreach_clamp_min_ + operator_name: _foreach_clamp_min_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108515,11 +115072,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108527,10 +115084,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108544,12 +115101,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul - operator_name: _foreach_mul - overload_name: ScalarList +- name: _foreach_clamp_min + operator_name: _foreach_clamp_min + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108557,11 +115114,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108569,10 +115126,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108589,12 +115146,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_ - operator_name: _foreach_mul_ - overload_name: ScalarList +- name: _foreach_clamp_min_ + operator_name: _foreach_clamp_min_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108602,11 +115159,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108614,10 +115171,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108718,12 +115275,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max - operator_name: _foreach_clamp_max - overload_name: ScalarList +- name: _foreach_maximum + operator_name: _foreach_maximum + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108731,11 +115288,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108743,10 +115300,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108763,12 +115320,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_ - operator_name: _foreach_clamp_max_ - overload_name: ScalarList +- name: _foreach_maximum_ + operator_name: _foreach_maximum_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108776,11 +115333,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108788,10 +115345,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -108807,10 +115364,10 @@ has_math_kernel: false - name: _foreach_maximum operator_name: _foreach_maximum - overload_name: ScalarList + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108818,11 +115375,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -108830,10 +115387,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108852,10 +115409,10 @@ has_math_kernel: false - name: _foreach_maximum_ operator_name: _foreach_maximum_ - overload_name: ScalarList + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108863,11 +115420,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -108875,10 +115432,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList method_of: - Type - namespace @@ -108892,12 +115449,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum - operator_name: _foreach_minimum +- name: _foreach_maximum + operator_name: _foreach_maximum overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -108937,12 +115494,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_ - operator_name: _foreach_minimum_ +- name: _foreach_maximum_ + operator_name: _foreach_maximum_ overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + schema_string: aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -108979,25 +115536,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp - operator_name: _foreach_exp - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -109014,25 +115581,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero_ - operator_name: _foreach_zero_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & method_of: - Type - namespace @@ -109046,56 +115623,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp_ - operator_name: _foreach_exp_ - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) - schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: [] - inplace: true - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _foreach_sqrt - operator_name: _foreach_sqrt - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_foreach_sqrt(Tensor[] self) -> Tensor[] - arguments: + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList method_of: - Type @@ -109113,25 +115668,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sqrt_ - operator_name: _foreach_sqrt_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList method_of: - Type - namespace @@ -109145,25 +115710,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs - operator_name: _foreach_abs - overload_name: '' +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -109180,25 +115755,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs_ - operator_name: _foreach_abs_ - overload_name: '' +- name: _foreach_minimum_ + operator_name: _foreach_minimum_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace @@ -109212,25 +115797,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos - operator_name: _foreach_acos - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109247,57 +115864,120 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos_ - operator_name: _foreach_acos_ - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin - operator_name: _foreach_asin - overload_name: '' +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109314,25 +115994,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin_ - operator_name: _foreach_asin_ - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109346,60 +116058,117 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan - operator_name: _foreach_atan - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan_ - operator_name: _foreach_atan_ - overload_name: '' +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109413,25 +116182,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil - operator_name: _foreach_ceil - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109448,57 +116249,120 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil_ - operator_name: _foreach_ceil_ - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos - operator_name: _foreach_cos - overload_name: '' +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109515,25 +116379,57 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos_ - operator_name: _foreach_cos_ - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & method_of: - Type - namespace @@ -109547,60 +116443,117 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh - operator_name: _foreach_cosh - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) - schema_order_arguments: - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh_ - operator_name: _foreach_cosh_ - overload_name: '' +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: Tensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & method_of: - Type - namespace @@ -109614,12 +116567,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf - operator_name: _foreach_erf +- name: _foreach_abs + operator_name: _foreach_abs overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_abs(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109649,12 +116602,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf_ - operator_name: _foreach_erf_ +- name: _foreach_abs_ + operator_name: _foreach_abs_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_abs_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109681,12 +116634,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc - operator_name: _foreach_erfc +- name: _foreach_acos + operator_name: _foreach_acos overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_acos(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109716,12 +116669,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc_ - operator_name: _foreach_erfc_ +- name: _foreach_acos_ + operator_name: _foreach_acos_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_acos_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109748,12 +116701,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1 - operator_name: _foreach_expm1 +- name: _foreach_asin + operator_name: _foreach_asin overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_asin(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109783,12 +116736,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1_ - operator_name: _foreach_expm1_ +- name: _foreach_asin_ + operator_name: _foreach_asin_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_asin_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109815,12 +116768,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor - operator_name: _foreach_floor +- name: _foreach_atan + operator_name: _foreach_atan overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_atan(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109850,12 +116803,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor_ - operator_name: _foreach_floor_ +- name: _foreach_atan_ + operator_name: _foreach_atan_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_atan_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109882,12 +116835,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log - operator_name: _foreach_log +- name: _foreach_ceil + operator_name: _foreach_ceil overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_ceil(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109917,12 +116870,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log_ - operator_name: _foreach_log_ +- name: _foreach_ceil_ + operator_name: _foreach_ceil_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_ceil_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -109949,12 +116902,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10 - operator_name: _foreach_log10 +- name: _foreach_cos + operator_name: _foreach_cos overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_cos(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -109984,12 +116937,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10_ - operator_name: _foreach_log10_ +- name: _foreach_cos_ + operator_name: _foreach_cos_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_cos_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110016,12 +116969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p - operator_name: _foreach_log1p +- name: _foreach_cosh + operator_name: _foreach_cosh overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_cosh(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110051,12 +117004,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p_ - operator_name: _foreach_log1p_ +- name: _foreach_cosh_ + operator_name: _foreach_cosh_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_cosh_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110083,12 +117036,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2 - operator_name: _foreach_log2 +- name: _foreach_erf + operator_name: _foreach_erf overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_erf(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110118,12 +117071,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2_ - operator_name: _foreach_log2_ +- name: _foreach_erf_ + operator_name: _foreach_erf_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_erf_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110150,12 +117103,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg - operator_name: _foreach_neg +- name: _foreach_erfc + operator_name: _foreach_erfc overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_erfc(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110185,12 +117138,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg_ - operator_name: _foreach_neg_ +- name: _foreach_erfc_ + operator_name: _foreach_erfc_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_erfc_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110217,12 +117170,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan - operator_name: _foreach_tan +- name: _foreach_exp + operator_name: _foreach_exp overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_exp(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110252,12 +117205,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan_ - operator_name: _foreach_tan_ +- name: _foreach_exp_ + operator_name: _foreach_exp_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110284,12 +117237,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh - operator_name: _foreach_tanh +- name: _foreach_expm1 + operator_name: _foreach_expm1 overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_expm1(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110319,12 +117272,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh_ - operator_name: _foreach_tanh_ +- name: _foreach_expm1_ + operator_name: _foreach_expm1_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_expm1_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110351,12 +117304,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin - operator_name: _foreach_sin +- name: _foreach_floor + operator_name: _foreach_floor overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_floor(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110386,12 +117339,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin_ - operator_name: _foreach_sin_ +- name: _foreach_floor_ + operator_name: _foreach_floor_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_floor_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110418,12 +117371,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh - operator_name: _foreach_sinh +- name: _foreach_frac + operator_name: _foreach_frac overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_frac(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110453,12 +117406,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh_ - operator_name: _foreach_sinh_ +- name: _foreach_frac_ + operator_name: _foreach_frac_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_frac_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110485,25 +117438,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round - operator_name: _foreach_round - overload_name: '' +- name: _foreach_lerp + operator_name: _foreach_lerp + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList method_of: - Type - namespace @@ -110520,25 +117493,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round_ - operator_name: _foreach_round_ - overload_name: '' +- name: _foreach_lerp_ + operator_name: _foreach_lerp_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList method_of: - Type - namespace @@ -110552,25 +117545,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma - operator_name: _foreach_lgamma - overload_name: '' +- name: _foreach_lerp + operator_name: _foreach_lerp + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & method_of: - Type - namespace @@ -110587,25 +117600,45 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma_ - operator_name: _foreach_lgamma_ - overload_name: '' +- name: _foreach_lerp_ + operator_name: _foreach_lerp_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList) - schema_order_arguments: - - annotation: a! + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & method_of: - Type - namespace @@ -110619,12 +117652,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac - operator_name: _foreach_frac +- name: _foreach_lgamma + operator_name: _foreach_lgamma overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_lgamma(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110654,12 +117687,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac_ - operator_name: _foreach_frac_ +- name: _foreach_lgamma_ + operator_name: _foreach_lgamma_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_lgamma_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110686,12 +117719,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal - operator_name: _foreach_reciprocal +- name: _foreach_log + operator_name: _foreach_log overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110721,12 +117754,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal_ - operator_name: _foreach_reciprocal_ +- name: _foreach_log_ + operator_name: _foreach_log_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110753,12 +117786,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid - operator_name: _foreach_sigmoid +- name: _foreach_log10 + operator_name: _foreach_log10 overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log10(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110788,12 +117821,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid_ - operator_name: _foreach_sigmoid_ +- name: _foreach_log10_ + operator_name: _foreach_log10_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log10_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110820,12 +117853,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc - operator_name: _foreach_trunc +- name: _foreach_log1p + operator_name: _foreach_log1p overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc(Tensor[] self) -> Tensor[] + schema_string: aten::_foreach_log1p(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -110855,12 +117888,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc_ - operator_name: _foreach_trunc_ +- name: _foreach_log1p_ + operator_name: _foreach_log1p_ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc_(Tensor(a!)[] self) -> () + schema_string: aten::_foreach_log1p_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList @@ -110887,121 +117920,60 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: Scalar +- name: _foreach_log2 + operator_name: _foreach_log2 + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + schema_string: aten::_foreach_log2(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: Scalar +- name: _foreach_log2_ + operator_name: _foreach_log2_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + schema_string: aten::_foreach_log2_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace @@ -111015,179 +117987,95 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: ScalarList +- name: _foreach_max + operator_name: _foreach_max + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + schema_string: aten::_foreach_max(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_ - operator_name: _foreach_addcdiv_ - overload_name: Tensor +- name: _foreach_neg + operator_name: _foreach_neg + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + schema_string: aten::_foreach_neg(Tensor[] self) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) - schema_order_arguments: - - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: ScalarList +- name: _foreach_neg_ + operator_name: _foreach_neg_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + schema_string: aten::_foreach_neg_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace @@ -111201,74 +118089,71 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_ - operator_name: _foreach_addcmul_ - overload_name: Tensor +- name: _foreach_norm + operator_name: _foreach_norm + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + schema_string: aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[] arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor2 - type: at::TensorList + name: ord + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &, ::std::optional) schema_order_arguments: - - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor2 - type: at::TensorList + name: ord + type: const at::Scalar & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional method_of: - Type - namespace mode: native python_module: '' - returns: [] - inplace: true + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv - overload_name: Scalar +- name: _foreach_pow + operator_name: _foreach_pow + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + schema_string: aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -111278,20 +118163,9 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -111301,19 +118175,8 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - name: value - type: const at::Scalar & method_of: - Type - namespace @@ -111330,12 +118193,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul +- name: _foreach_pow + operator_name: _foreach_pow overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + schema_string: aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList @@ -111343,22 +118206,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -111366,20 +118218,9 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & method_of: - Type @@ -111397,54 +118238,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv +- name: _foreach_pow + operator_name: _foreach_pow overload_name: ScalarList manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef method_of: - Type @@ -111462,55 +118283,35 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv - operator_name: _foreach_addcdiv - overload_name: Tensor +- name: _foreach_pow + operator_name: _foreach_pow + overload_name: ScalarAndTensor manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + schema_string: aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[] arguments: - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList + type: const at::Scalar & - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) + schema_order_cpp_signature: ::std::vector (const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false name: self - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList + type: const at::Scalar & - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & method_of: - Type - namespace @@ -111527,14 +118328,14 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul - overload_name: ScalarList +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: List manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + schema_string: aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self @@ -111542,21 +118343,11 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self @@ -111564,130 +118355,124 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 + name: exponent type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul - operator_name: _foreach_addcmul - overload_name: Tensor +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: Scalar manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + schema_string: aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &) - schema_order_arguments: - - annotation: null + name: exponent + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_pow_ + operator_name: _foreach_pow_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> () + arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null + name: exponent + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::ArrayRef is_nullable: false - name: scalars - type: const at::Tensor & + name: exponent + type: at::ArrayRef method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - name: result - type: ::std::vector - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_norm - operator_name: _foreach_norm - overload_name: Scalar +- name: _foreach_reciprocal + operator_name: _foreach_reciprocal + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] + schema_string: aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & method_of: - Type - namespace @@ -111704,44 +118489,56 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp - operator_name: _foreach_lerp - overload_name: List +- name: _foreach_reciprocal_ + operator_name: _foreach_reciprocal_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] + schema_string: aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: weights - type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - - annotation: null + - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round + operator_name: _foreach_round + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round(Tensor[] self) -> Tensor[] + arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList method_of: - Type @@ -111759,44 +118556,91 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_ - operator_name: _foreach_lerp_ - overload_name: List +- name: _foreach_round_ + operator_name: _foreach_round_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () + schema_string: aten::_foreach_round_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid + operator_name: _foreach_sigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - - annotation: a! + - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid_ + operator_name: _foreach_sigmoid_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList method_of: - Type @@ -111811,45 +118655,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp - operator_name: _foreach_lerp - overload_name: Scalar +- name: _foreach_sign + operator_name: _foreach_sign + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] + schema_string: aten::_foreach_sign(Tensor[] self) -> Tensor[] arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & method_of: - Type - namespace @@ -111866,45 +118690,25 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_ - operator_name: _foreach_lerp_ - overload_name: Scalar +- name: _foreach_sign_ + operator_name: _foreach_sign_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () + schema_string: aten::_foreach_sign_(Tensor(a!)[] self) -> () arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensors1 - type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & - is_nullable: false - name: weight - type: const at::Scalar & method_of: - Type - namespace @@ -111918,72 +118722,34 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize - operator_name: bucketize - overload_name: Tensor +- name: _foreach_sin + operator_name: _foreach_sin + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + schema_string: aten::_foreach_sin(Tensor[] self) -> Tensor[] arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: at::TensorList name: result - type: at::Tensor + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -111991,159 +118757,66 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize_out - operator_name: bucketize - overload_name: Tensor_out +- name: _foreach_sin_ + operator_name: _foreach_sin_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_foreach_sin_(Tensor(a!)[] self) -> () arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false + returns: [] + inplace: true is_factory_method: false abstract: true device_guard: true with_gil: false deprecated: false has_math_kernel: false -- name: bucketize - operator_name: bucketize - overload_name: Scalar +- name: _foreach_sinh + operator_name: _foreach_sinh + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + schema_string: aten::_foreach_sinh(Tensor[] self) -> Tensor[] arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool - schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: out_int32 - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: right - type: bool + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor + - dynamic_type: at::TensorList name: result - type: at::Tensor + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -112151,26 +118824,470 @@ with_gil: false deprecated: false has_math_kernel: false -- name: searchsorted - operator_name: searchsorted - overload_name: Tensor +- name: _foreach_sinh_ + operator_name: _foreach_sinh_ + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + schema_string: aten::_foreach_sinh_(Tensor(a!)[] self) -> () arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + - annotation: a! + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt + operator_name: _foreach_sqrt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt_ + operator_name: _foreach_sqrt_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan + operator_name: _foreach_tan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan_ + operator_name: _foreach_tan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh + operator_name: _foreach_tanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh_ + operator_name: _foreach_tanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc + operator_name: _foreach_trunc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc(Tensor[] self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc_ + operator_name: _foreach_trunc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_zero_ + operator_name: _foreach_zero_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_zero_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy_ + operator_name: _foreach_copy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: void (at::TensorList, at::TensorList, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy + operator_name: _foreach_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: src + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool is_nullable: false kwarg_only: true name: out_int32 @@ -112182,21 +119299,254 @@ kwarg_only: true name: right type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: - annotation: null - default: c10::nullopt + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize_out + operator_name: bucketize + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112223,19 +119573,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -112291,20 +119641,20 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, ::std::optional, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112331,19 +119681,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -112399,20 +119749,20 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, ::std::optional, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -112439,19 +119789,19 @@ name: right type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: side - type: c10::optional + type: ::std::optional - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: sorter - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -112468,6 +119818,121 @@ with_gil: false deprecated: false has_math_kernel: false +- name: searchsorted_out + operator_name: searchsorted + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, ::std::optional, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _convert_indices_from_coo_to_csr operator_name: _convert_indices_from_coo_to_csr overload_name: '' @@ -113130,14 +120595,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113166,7 +120631,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113230,14 +120695,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113266,7 +120731,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113333,14 +120798,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113372,7 +120837,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113439,14 +120904,14 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const ::std::optional &, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113478,7 +120943,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113968,7 +121433,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -113981,7 +121446,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -113998,7 +121463,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114056,7 +121521,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114069,7 +121534,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114086,7 +121551,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114137,7 +121602,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114150,7 +121615,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114167,7 +121632,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114231,7 +121696,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114242,7 +121707,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114258,7 +121723,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114323,7 +121788,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114334,7 +121799,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114350,7 +121815,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114415,7 +121880,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114431,7 +121896,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114452,7 +121917,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114517,7 +121982,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114533,7 +121998,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114554,7 +122019,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114615,7 +122080,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114628,7 +122093,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114645,7 +122110,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114703,7 +122168,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114716,7 +122181,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114733,7 +122198,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -114797,7 +122262,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114808,7 +122273,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114824,7 +122289,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114889,7 +122354,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114900,7 +122365,7 @@ is_nullable: false name: ignore_index type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -114916,7 +122381,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114981,7 +122446,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -114997,7 +122462,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -115018,7 +122483,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -115083,7 +122548,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -115099,7 +122564,7 @@ is_nullable: false name: total_weight type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -115120,7 +122585,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -118227,12 +125692,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -118263,11 +125728,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -118327,12 +125792,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -118363,11 +125828,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -118515,12 +125980,12 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional) schema_order_arguments: - annotation: a! dynamic_type: at::Tensor @@ -118551,11 +126016,11 @@ name: training type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120248,12 +127713,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120293,11 +127758,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120366,12 +127831,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120411,11 +127876,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120488,8 +127953,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120533,7 +127998,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120606,8 +128071,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120651,7 +128116,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120720,12 +128185,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120765,11 +128230,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -120838,12 +128303,12 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -120883,11 +128348,11 @@ name: count_include_pad type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -120960,8 +128425,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -121005,7 +128470,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -121078,8 +128543,8 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -121123,7 +128588,7 @@ dynamic_type: int64_t is_nullable: true name: divisor_override - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -122785,7 +130250,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -122856,7 +130321,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + schema_string: aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -122913,7 +130378,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -123008,7 +130473,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + schema_string: aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -124568,12 +132033,12 @@ name: mode type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124591,11 +132056,11 @@ name: mode type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -124636,12 +132101,12 @@ name: mode type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::string_view, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::string_view, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124660,11 +132125,11 @@ name: mode type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: value - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -124707,8 +132172,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124729,7 +132194,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124772,8 +132237,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124794,7 +132259,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124837,8 +132302,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124859,7 +132324,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124902,8 +132367,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124924,7 +132389,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -124967,8 +132432,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -124989,7 +132454,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125032,8 +132497,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125054,7 +132519,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125092,8 +132557,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125109,7 +132574,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125147,8 +132612,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125164,7 +132629,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125202,8 +132667,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125219,7 +132684,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125257,8 +132722,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125274,7 +132739,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125312,8 +132777,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125329,7 +132794,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125367,8 +132832,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125384,7 +132849,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: scale_factors - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -125432,12 +132897,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125456,11 +132921,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125508,12 +132973,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125532,11 +132997,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125590,12 +133055,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125620,11 +133085,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125678,12 +133143,12 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125708,11 +133173,11 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125760,18 +133225,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125790,17 +133255,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -125848,18 +133313,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125878,17 +133343,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -125942,18 +133407,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -125978,17 +133443,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126042,18 +133507,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126078,17 +133543,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126136,18 +133601,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126166,17 +133631,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126224,18 +133689,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126254,17 +133719,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126318,18 +133783,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126354,17 +133819,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126418,18 +133883,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126454,17 +133919,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126512,18 +133977,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126542,17 +134007,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126600,18 +134065,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126630,17 +134095,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126694,18 +134159,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126730,17 +134195,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126794,18 +134259,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126830,17 +134295,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -126888,18 +134353,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -126918,17 +134383,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -126976,18 +134441,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127006,17 +134471,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127070,18 +134535,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127106,17 +134571,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127170,18 +134635,18 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127206,17 +134671,17 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127264,24 +134729,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127300,23 +134765,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127364,24 +134829,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127400,23 +134865,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127470,24 +134935,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127512,23 +134977,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127582,24 +135047,24 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127624,23 +135089,23 @@ name: align_corners type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127683,12 +135148,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127702,11 +135167,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127756,12 +135221,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127775,11 +135240,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -127822,12 +135287,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127841,11 +135306,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127881,12 +135346,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127900,11 +135365,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -127953,12 +135418,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -127978,11 +135443,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128038,12 +135503,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128063,11 +135528,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128116,12 +135581,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128141,11 +135606,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128187,12 +135652,12 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128212,11 +135677,11 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128259,18 +135724,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128284,17 +135749,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128344,18 +135809,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128369,17 +135834,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128422,18 +135887,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128447,17 +135912,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128493,18 +135958,18 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128518,17 +135983,17 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128577,18 +136042,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128608,17 +136073,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128674,18 +136139,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128705,17 +136170,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -128764,18 +136229,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128795,17 +136260,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128847,18 +136312,18 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128878,17 +136343,17 @@ size: 4 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -128931,24 +136396,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -128962,23 +136427,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129028,24 +136493,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129059,23 +136524,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129118,24 +136583,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129149,23 +136614,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129201,24 +136666,24 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129232,23 +136697,23 @@ size: 3 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129297,24 +136762,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129334,23 +136799,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129406,24 +136871,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129443,23 +136908,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129508,24 +136973,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129545,23 +137010,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129603,24 +137068,24 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129640,23 +137105,23 @@ size: 5 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_d - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_h - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: scales_w - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -129802,12 +137267,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129820,11 +137285,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -129866,12 +137331,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -129884,11 +137349,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -130014,7 +137479,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130044,7 +137509,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130073,7 +137538,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130096,7 +137561,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130153,7 +137618,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor + schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130176,7 +137641,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130205,7 +137670,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130228,7 +137693,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130278,7 +137743,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130308,7 +137773,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130337,7 +137802,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130360,7 +137825,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130417,7 +137882,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor + schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130440,7 +137905,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130469,7 +137934,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130492,7 +137957,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130542,7 +138007,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130572,7 +138037,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130587,7 +138052,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130610,7 +138075,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130653,7 +138118,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130676,7 +138141,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130691,7 +138156,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130714,7 +138179,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -130750,7 +138215,7 @@ overload_name: output manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) + schema_string: aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -130779,7 +138244,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130792,7 +138257,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130814,7 +138279,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130855,7 +138320,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor + schema_string: aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -130877,7 +138342,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130890,7 +138355,7 @@ name: padding size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -130912,7 +138377,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -130946,7 +138411,7 @@ overload_name: grad_input manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -131085,7 +138550,7 @@ overload_name: output_mask manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + schema_string: aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -131195,7 +138660,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131224,7 +138689,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131243,7 +138708,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131265,7 +138730,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131312,7 +138777,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor + schema_string: aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131334,7 +138799,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131353,7 +138818,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131375,7 +138840,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131415,7 +138880,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor + schema_string: aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131437,7 +138902,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131456,7 +138921,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131478,7 +138943,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131518,7 +138983,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131548,7 +139013,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131563,7 +139028,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131586,7 +139051,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131629,7 +139094,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor + schema_string: aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131652,7 +139117,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131667,7 +139132,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131690,7 +139155,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131726,7 +139191,7 @@ overload_name: output manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -131755,7 +139220,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131768,7 +139233,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131790,7 +139255,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131831,7 +139296,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor + schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131853,7 +139318,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131866,7 +139331,7 @@ name: padding size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131888,7 +139353,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -131922,7 +139387,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor + schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -131945,7 +139410,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -131967,7 +139432,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -131990,7 +139455,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -132033,7 +139498,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor + schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -132056,7 +139521,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -132078,7 +139543,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -132101,7 +139566,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -135388,12 +142853,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -135401,11 +142866,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -135442,12 +142907,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -135455,11 +142920,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: eps - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136101,13 +143566,13 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136120,12 +143585,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136472,12 +143937,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136490,11 +143955,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136516,7 +143981,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136524,11 +143989,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136536,12 +144001,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136549,11 +144014,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136561,11 +144026,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136587,7 +144052,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136602,11 +144067,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136614,12 +144079,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136627,11 +144092,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136639,11 +144104,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136672,7 +144137,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136680,11 +144145,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136692,12 +144157,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136705,11 +144170,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136717,11 +144182,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136743,7 +144208,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136758,11 +144223,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136770,12 +144235,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136783,11 +144248,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136795,11 +144260,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136828,7 +144293,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136836,11 +144301,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136848,12 +144313,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136861,11 +144326,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136873,11 +144338,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -136899,7 +144364,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -136914,11 +144379,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136926,12 +144391,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -136939,11 +144404,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -136951,11 +144416,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -136984,7 +144449,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -136992,11 +144457,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137004,12 +144469,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137017,11 +144482,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137029,11 +144494,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137055,7 +144520,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137070,11 +144535,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137082,12 +144547,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137095,11 +144560,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137107,11 +144572,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137140,7 +144605,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137148,11 +144613,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137160,12 +144625,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137173,11 +144638,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137185,11 +144650,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137211,7 +144676,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137226,11 +144691,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137238,12 +144703,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137251,11 +144716,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137263,11 +144728,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137296,7 +144761,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + schema_string: aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137304,11 +144769,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137316,12 +144781,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137329,11 +144794,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137341,11 +144806,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137367,7 +144832,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137382,11 +144847,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137394,12 +144859,12 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137407,11 +144872,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: n - type: c10::optional + type: ::std::optional - annotation: null default: -1 dynamic_type: int64_t @@ -137419,11 +144884,11 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137452,7 +144917,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137460,7 +144925,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137474,12 +144939,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137487,7 +144952,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137501,11 +144966,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137527,7 +144992,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137542,7 +145007,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137556,12 +145021,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137569,7 +145034,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137583,11 +145048,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137616,7 +145081,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137624,7 +145089,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137638,12 +145103,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137651,7 +145116,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137665,11 +145130,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137691,7 +145156,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137706,7 +145171,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137720,12 +145185,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137733,7 +145198,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137747,11 +145212,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137780,7 +145245,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137788,7 +145253,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137802,12 +145267,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137815,7 +145280,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137829,11 +145294,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -137855,7 +145320,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -137870,7 +145335,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137884,12 +145349,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137897,7 +145362,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137911,11 +145376,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -137944,7 +145409,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -137952,7 +145417,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137966,12 +145431,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -137979,7 +145444,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -137993,11 +145458,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138019,7 +145484,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138034,7 +145499,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138048,12 +145513,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138061,7 +145526,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138075,11 +145540,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138108,7 +145573,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138116,7 +145581,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138130,12 +145595,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138143,7 +145608,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138157,11 +145622,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138183,7 +145648,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138198,7 +145663,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138212,12 +145677,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138225,7 +145690,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138239,11 +145704,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138272,7 +145737,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + schema_string: aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138280,7 +145745,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138294,12 +145759,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138307,7 +145772,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138321,11 +145786,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138347,7 +145812,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138362,7 +145827,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138376,12 +145841,12 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138389,7 +145854,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s @@ -138403,11 +145868,11 @@ size: 1 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138436,7 +145901,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138444,26 +145909,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138471,25 +145936,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138511,7 +145976,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138526,26 +145991,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138553,25 +146018,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138600,7 +146065,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138608,26 +146073,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138635,25 +146100,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138675,7 +146140,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138690,26 +146155,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138717,25 +146182,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138764,7 +146229,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138772,26 +146237,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138799,25 +146264,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -138839,7 +146304,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -138854,26 +146319,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138881,25 +146346,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -138928,7 +146393,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -138936,26 +146401,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -138963,25 +146428,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139003,7 +146468,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139018,26 +146483,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139045,25 +146510,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139092,7 +146557,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -139100,26 +146565,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139127,25 +146592,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139167,7 +146632,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139182,26 +146647,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139209,25 +146674,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139256,7 +146721,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + schema_string: aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -139264,26 +146729,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139291,25 +146756,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139331,7 +146796,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -139346,26 +146811,26 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -139373,25 +146838,25 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: s size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true name: norm - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -139440,7 +146905,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -139454,33 +146919,33 @@ name: d type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139583,7 +147048,7 @@ kwarg_only: true name: options type: at::TensorOptions - schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_cpp_signature: at::Tensor (int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -139597,33 +147062,33 @@ name: d type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true kwarg_only: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true kwarg_only: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -139714,7 +147179,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139728,7 +147193,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139763,7 +147228,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -139777,7 +147242,7 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -140260,7 +147725,7 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false @@ -140346,7 +147811,7 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false @@ -141645,19 +149110,19 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -141670,18 +149135,18 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -141761,19 +149226,19 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -141786,18 +149251,18 @@ name: b type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true name: rcond - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -142663,6 +150128,41 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _linalg_eigvals + operator_name: _linalg_eigvals + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_eigvals(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: linalg_eigvals operator_name: linalg_eigvals overload_name: '' @@ -142742,11 +150242,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _linalg_eigh operator_name: _linalg_eigh overload_name: '' @@ -143892,13 +151392,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143911,13 +151411,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -143925,13 +151425,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143944,12 +151444,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -143984,7 +151484,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -143997,13 +151497,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144016,7 +151516,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144029,12 +151529,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144071,13 +151571,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144090,13 +151590,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144104,13 +151604,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: ord - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144123,12 +151623,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144177,7 +151677,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144190,13 +151690,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144209,7 +151709,7 @@ name: ord type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144222,12 +151722,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144270,7 +151770,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144283,13 +151783,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144303,7 +151803,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144316,12 +151816,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144364,7 +151864,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144377,13 +151877,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144397,7 +151897,7 @@ name: ord type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim @@ -144410,12 +151910,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144469,13 +151969,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144500,12 +152000,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144559,13 +152059,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144590,12 +152090,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144650,13 +152150,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144682,12 +152182,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144742,13 +152242,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144774,12 +152274,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -144828,13 +152328,13 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144854,12 +152354,12 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -144934,13 +152434,13 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -144960,12 +152460,12 @@ name: compute_uv type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145034,13 +152534,13 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145054,12 +152554,12 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -145128,13 +152628,13 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145148,12 +152648,12 @@ name: full_matrices type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145216,13 +152716,13 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145230,12 +152730,12 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -145272,13 +152772,13 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145286,12 +152786,12 @@ name: A type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: c10::string_view is_nullable: true kwarg_only: true name: driver - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145328,12 +152828,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145341,11 +152841,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -145382,12 +152882,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145395,11 +152895,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -145545,14 +153045,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145560,7 +153060,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145573,14 +153073,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145629,14 +153129,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145644,7 +153144,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145657,14 +153157,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -145708,19 +153208,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145728,7 +153228,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145736,19 +153236,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145792,19 +153292,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -145812,7 +153312,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -145820,19 +153320,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -146602,6 +154102,65 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _spsolve + operator_name: _spsolve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_spsolve(Tensor A, Tensor B, *, bool left=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: linalg_solve_out operator_name: linalg_solve overload_name: out @@ -146801,7 +154360,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146819,7 +154378,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146865,7 +154424,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -146883,7 +154442,7 @@ name: other type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dims @@ -147169,14 +154728,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147184,7 +154743,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147197,14 +154756,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147253,14 +154812,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147268,7 +154827,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147281,14 +154840,14 @@ is_nullable: true kwarg_only: true name: atol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: rtol - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -147332,19 +154891,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147352,7 +154911,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147360,19 +154919,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147416,19 +154975,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147436,7 +154995,7 @@ kwarg_only: true name: hermitian type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -147444,19 +155003,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: atol - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: double is_nullable: true kwarg_only: true name: rtol - type: c10::optional + type: ::std::optional - annotation: null default: false dynamic_type: bool @@ -147845,7 +155404,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -147863,7 +155422,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -147941,6 +155500,63 @@ with_gil: false deprecated: false has_math_kernel: true +- name: _test_parallel_materialize + operator_name: _test_parallel_materialize + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_parallel + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: skip_first + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_parallel + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: skip_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _test_optional_intlist operator_name: _test_optional_intlist overload_name: '' @@ -148049,8 +155665,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional>) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional>) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148061,7 +155677,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> + type: ::std::optional> method_of: - Type - namespace @@ -148463,21 +156079,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148493,13 +156109,13 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148517,21 +156133,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148547,12 +156163,12 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -148602,14 +156218,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148618,13 +156234,13 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, int64_t, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -148652,14 +156268,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -148668,12 +156284,12 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -148695,7 +156311,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + schema_string: aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor arguments: - annotation: null dynamic_type: at::TensorList @@ -148714,7 +156330,13 @@ is_nullable: false name: padding_value type: double - schema_order_cpp_signature: at::Tensor (at::TensorList, bool, double) + - annotation: null + default: '"right"' + dynamic_type: c10::string_view + is_nullable: false + name: padding_side + type: c10::string_view + schema_order_cpp_signature: at::Tensor (at::TensorList, bool, double, c10::string_view) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -148733,6 +156355,12 @@ is_nullable: false name: padding_value type: double + - annotation: null + default: '"right"' + dynamic_type: c10::string_view + is_nullable: false + name: padding_side + type: c10::string_view method_of: - Type - namespace @@ -148842,30 +156470,30 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional - schema_order_cpp_signature: at::Tensor (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (at::TensorList, ::std::optional, ::std::optional, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -148873,29 +156501,29 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -149175,12 +156803,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -149198,11 +156826,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -149603,24 +157231,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -149634,17 +157262,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -150770,7 +158398,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -150788,7 +158416,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -150809,6 +158437,130 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _jagged_to_padded_dense_forward + operator_name: _jagged_to_padded_dense_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: max_lengths + type: at::IntArrayRef + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, at::IntArrayRef, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: max_lengths + type: at::IntArrayRef + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _padded_dense_to_jagged_forward + operator_name: _padded_dense_to_jagged_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_padded_dense_to_jagged_forward(Tensor dense, Tensor[] offsets, SymInt? total_L=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: total_L + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: offsets + type: at::TensorList + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + name: total_L + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _nested_tensor_softmax_with_shape operator_name: _nested_tensor_softmax_with_shape overload_name: '' @@ -150854,6 +158606,63 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _safe_softmax + operator_name: _safe_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: _transformer_encoder_layer_fwd operator_name: _transformer_encoder_layer_fwd overload_name: '' @@ -150956,14 +158765,14 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151060,13 +158869,13 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -151140,7 +158949,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -151154,12 +158963,12 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151211,7 +159020,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -151225,11 +159034,11 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -151254,7 +159063,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor + schema_string: aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -151276,7 +159085,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151289,7 +159098,21 @@ is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151311,7 +159134,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151324,6 +159147,20 @@ is_nullable: false name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool method_of: - Type - namespace @@ -151340,12 +159177,12 @@ with_gil: false deprecated: false has_math_kernel: true -- name: _scaled_dot_product_attention - operator_name: _scaled_dot_product_attention +- name: _fused_sdp_choice + operator_name: _fused_sdp_choice overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) + schema_string: aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int arguments: - annotation: null dynamic_type: at::Tensor @@ -151367,7 +159204,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151378,15 +159215,23 @@ default: false dynamic_type: bool is_nullable: false - name: need_attn_weights + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + kwarg_only: true + name: enable_gqa type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool, bool) + schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151408,7 +159253,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151419,39 +159264,44 @@ default: false dynamic_type: bool is_nullable: false - name: need_attn_weights + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + kwarg_only: true + name: enable_gqa type: bool method_of: - Type - namespace mode: native - python_module: nn + python_module: '' returns: - - dynamic_type: at::Tensor - name: result0 - type: at::Tensor - - dynamic_type: at::Tensor - name: result1 - type: at::Tensor + - dynamic_type: int64_t + name: result + type: int64_t inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true -- name: _fused_sdp_choice - operator_name: _fused_sdp_choice + has_math_kernel: false +- name: _scaled_dot_product_attention_math + operator_name: _scaled_dot_product_attention_math overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int + schema_string: aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -151473,7 +159323,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151486,7 +159336,27 @@ is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: int64_t (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool) + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: dropout_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, const ::std::optional &, ::std::optional, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151508,7 +159378,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151521,28 +159391,51 @@ is_nullable: false name: is_causal type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: dropout_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: enable_gqa + type: bool method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: int64_t - name: result - type: int64_t + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor inplace: false is_factory_method: false - abstract: true + abstract: false device_guard: true with_gil: false deprecated: false - has_math_kernel: false -- name: _scaled_dot_product_attention_math - operator_name: _scaled_dot_product_attention_math + has_math_kernel: true +- name: _scaled_dot_product_attention_math_for_mps + operator_name: _scaled_dot_product_attention_math_for_mps overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_attention_math_for_mps(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -151564,7 +159457,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151582,8 +159475,15 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_mask - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool, const c10::optional &) + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151605,7 +159505,7 @@ dynamic_type: at::Tensor is_nullable: true name: attn_mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0.0 dynamic_type: double @@ -151623,7 +159523,14 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_mask - type: const c10::optional & + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151638,17 +159545,17 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: _scaled_dot_product_flash_attention operator_name: _scaled_dot_product_flash_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask) + schema_string: aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor @@ -151683,7 +159590,14 @@ is_nullable: false name: return_debug_mask type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151718,6 +159632,13 @@ is_nullable: false name: return_debug_mask type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151725,8 +159646,8 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: ouput - name: ouput + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor field_name: logsumexp @@ -151743,19 +159664,19 @@ - dynamic_type: int64_t field_name: max_q name: max_q - type: int64_t + type: c10::SymInt - dynamic_type: int64_t field_name: max_k name: max_k - type: int64_t - - dynamic_type: int64_t + type: c10::SymInt + - dynamic_type: at::Tensor field_name: philox_seed name: philox_seed - type: int64_t - - dynamic_type: int64_t + type: at::Tensor + - dynamic_type: at::Tensor field_name: philox_offset name: philox_offset - type: int64_t + type: at::Tensor - dynamic_type: at::Tensor field_name: debug_attn_mask name: debug_attn_mask @@ -151767,18 +159688,13 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_flash_attention_backward - operator_name: _scaled_dot_product_flash_attention_backward +- name: _scaled_dot_product_flash_attention_for_cpu + operator_name: _scaled_dot_product_flash_attention_for_cpu overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + schema_string: aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp) arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_out - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -151795,62 +159711,33 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: out - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: logsumexp - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_q - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_k - type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - - annotation: null + default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - annotation: null + default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_seed - type: int64_t + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_offset - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, int64_t, int64_t) + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_out - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -151867,55 +159754,31 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: out - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: logsumexp - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_q - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cum_seq_k - type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - - annotation: null + default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - annotation: null + default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_seed - type: int64_t + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & - annotation: null - dynamic_type: int64_t - is_nullable: false - name: philox_offset - type: int64_t + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -151923,16 +159786,12 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: grad_query - name: grad_query - type: at::Tensor - - dynamic_type: at::Tensor - field_name: grad_key - name: grad_key + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - field_name: grad_value - name: grad_value + field_name: logsumexp + name: logsumexp type: at::Tensor inplace: false is_factory_method: false @@ -151941,12 +159800,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_efficient_attention - operator_name: _scaled_dot_product_efficient_attention +- name: _scaled_dot_product_fused_attention_overrideable + operator_name: _scaled_dot_product_fused_attention_overrideable overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_fused_attention_overrideable(Tensor query, Tensor key, Tensor value, Tensor? attn_bias=None, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor @@ -151964,17 +159823,37 @@ name: value type: const at::Tensor & - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: compute_log_sumexp + name: is_causal type: bool - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + name: return_debug_mask type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -151992,16 +159871,36 @@ name: value type: const at::Tensor & - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: compute_log_sumexp + name: is_causal type: bool - annotation: null default: false dynamic_type: bool is_nullable: false - name: is_causal + name: return_debug_mask type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152009,10 +159908,40 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_q + name: cum_seq_q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_k + name: cum_seq_k + type: at::Tensor + - dynamic_type: int64_t + field_name: max_q + name: max_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_k + name: max_k + type: c10::SymInt + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask type: at::Tensor inplace: false is_factory_method: false @@ -152021,17 +159950,17 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _scaled_dot_product_efficient_attention_backward - operator_name: _scaled_dot_product_efficient_attention_backward +- name: _scaled_dot_product_flash_attention_backward + operator_name: _scaled_dot_product_flash_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_out_ + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -152059,23 +159988,58 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false + dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null dynamic_type: bool is_nullable: false name: is_causal type: bool - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: chunk_grad_outputs - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad_out_ + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -152102,100 +160066,71 @@ is_nullable: false name: logsumexp type: const at::Tensor & - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: is_causal - type: bool - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: chunk_grad_outputs - type: bool - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: result0 - type: at::Tensor - - dynamic_type: at::Tensor - name: result1 - type: at::Tensor - - dynamic_type: at::Tensor - name: result2 - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _chunk_grad_outputs_efficient_attention - operator_name: _chunk_grad_outputs_efficient_attention - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool - arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: query + name: cum_seq_q type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + name: cum_seq_k type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: int64_t is_nullable: false - name: value - type: const at::Tensor & + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null - default: false dynamic_type: bool is_nullable: false name: is_causal type: bool - schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: query - type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: value + name: philox_offset type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: is_causal - type: bool + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: bool - name: result - type: bool + - dynamic_type: at::Tensor + field_name: grad_query + name: grad_query + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_key + name: grad_key + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_value + name: grad_value + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -152203,13 +160138,18 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _flash_attention_forward - operator_name: _flash_attention_forward +- name: _scaled_dot_product_flash_attention_for_cpu_backward + operator_name: _scaled_dot_product_flash_attention_for_cpu_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask) + schema_string: aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152228,23 +160168,13 @@ - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_q + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_k + name: logsumexp type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - annotation: null dynamic_type: double is_nullable: false @@ -152256,12 +160186,26 @@ name: is_causal type: bool - annotation: null - dynamic_type: bool - is_nullable: false - name: return_debug_mask - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, bool) + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, const ::std::optional &, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152280,23 +160224,13 @@ - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_q + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: cum_seq_k + name: logsumexp type: const at::Tensor & - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_q - type: int64_t - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: max_k - type: int64_t - annotation: null dynamic_type: double is_nullable: false @@ -152308,10 +160242,19 @@ name: is_causal type: bool - annotation: null - dynamic_type: bool - is_nullable: false - name: return_debug_mask - type: bool + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: attn_mask + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152319,24 +160262,16 @@ python_module: '' returns: - dynamic_type: at::Tensor - field_name: output - name: output + field_name: grad_query + name: grad_query type: at::Tensor - dynamic_type: at::Tensor - field_name: softmax_logsumexp - name: softmax_logsumexp + field_name: grad_key + name: grad_key type: at::Tensor - - dynamic_type: int64_t - field_name: philox_seed - name: philox_seed - type: int64_t - - dynamic_type: int64_t - field_name: philox_offset - name: philox_offset - type: int64_t - dynamic_type: at::Tensor - field_name: debug_attn_mask - name: debug_attn_mask + field_name: grad_value + name: grad_value type: at::Tensor inplace: false is_factory_method: false @@ -152345,12 +160280,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _flash_attention_backward - operator_name: _flash_attention_backward +- name: _scaled_dot_product_fused_attention_overrideable_backward + operator_name: _scaled_dot_product_fused_attention_overrideable_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias) arguments: - annotation: null dynamic_type: at::Tensor @@ -152372,6 +160307,16 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152413,16 +160358,23 @@ name: is_causal type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_seed - type: int64_t + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_offset - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, int64_t, int64_t) + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152444,6 +160396,16 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152485,15 +160447,22 @@ name: is_causal type: bool - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_seed - type: int64_t + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false name: philox_offset - type: int64_t + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152501,13 +160470,20 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: grad_query + name: grad_query type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: grad_key + name: grad_key type: at::Tensor - dynamic_type: at::Tensor - name: result2 + field_name: grad_value + name: grad_value + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_attn_bias + name: grad_attn_bias type: at::Tensor inplace: false is_factory_method: false @@ -152516,12 +160492,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _efficient_attention_forward - operator_name: _efficient_attention_forward +- name: _scaled_dot_product_efficient_attention + operator_name: _scaled_dot_product_efficient_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor) + schema_string: aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset) arguments: - annotation: null dynamic_type: at::Tensor @@ -152541,31 +160517,33 @@ - annotation: null dynamic_type: at::Tensor is_nullable: true - name: cu_seqlens_q - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: cu_seqlens_k - type: const c10::optional & - - annotation: null - dynamic_type: int64_t - is_nullable: true - name: max_seqlen_q - type: c10::optional + name: attn_bias + type: const ::std::optional & - annotation: null - default: false dynamic_type: bool is_nullable: false name: compute_log_sumexp type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null default: false dynamic_type: bool is_nullable: false - name: causal + name: is_causal type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, c10::optional, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, double, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152585,30 +160563,32 @@ - annotation: null dynamic_type: at::Tensor is_nullable: true - name: cu_seqlens_q - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: cu_seqlens_k - type: const c10::optional & - - annotation: null - dynamic_type: int64_t - is_nullable: true - name: max_seqlen_q - type: c10::optional + name: attn_bias + type: const ::std::optional & - annotation: null - default: false dynamic_type: bool is_nullable: false name: compute_log_sumexp type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null default: false dynamic_type: bool is_nullable: false - name: causal + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152616,10 +160596,20 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result0 + field_name: output + name: output type: at::Tensor - dynamic_type: at::Tensor - name: result1 + field_name: log_sumexp + name: log_sumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset type: at::Tensor inplace: false is_factory_method: false @@ -152628,12 +160618,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _efficient_attention_backward - operator_name: _efficient_attention_backward +- name: _scaled_dot_product_efficient_attention_backward + operator_name: _scaled_dot_product_efficient_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) + schema_string: aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) arguments: - annotation: null dynamic_type: at::Tensor @@ -152655,6 +160645,11 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152666,18 +160661,39 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: is_causal - type: bool + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null default: false dynamic_type: bool is_nullable: false - name: chunk_grad_outputs + name: is_causal type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, ::std::array, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -152699,6 +160715,11 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: attn_bias + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152710,17 +160731,38 @@ name: logsumexp type: const at::Tensor & - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::Tensor is_nullable: false - name: is_causal - type: bool + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: grad_input_mask + type: ::std::array - annotation: null default: false dynamic_type: bool is_nullable: false - name: chunk_grad_outputs + name: is_causal type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152736,6 +160778,9 @@ - dynamic_type: at::Tensor name: result2 type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -152743,57 +160788,115 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _triton_scaled_dot_attention - operator_name: _triton_scaled_dot_attention +- name: _scaled_dot_product_cudnn_attention + operator_name: _scaled_dot_product_cudnn_attention overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + schema_string: aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: q + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: k + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: v + name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_log_sumexp + type: bool - annotation: null default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double) + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, double, bool, bool, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: q + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: k + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: v + name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: attn_bias + type: const ::std::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_log_sumexp + type: bool - annotation: null default: 0.0 dynamic_type: double is_nullable: false name: dropout_p type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152801,7 +160904,40 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_q + name: cum_seq_q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cum_seq_k + name: cum_seq_k + type: at::Tensor + - dynamic_type: int64_t + field_name: max_q + name: max_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_k + name: max_k + type: c10::SymInt + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask type: at::Tensor inplace: false is_factory_method: false @@ -152810,13 +160946,18 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _triton_multi_head_attention - operator_name: _triton_multi_head_attention +- name: _scaled_dot_product_cudnn_attention_backward + operator_name: _scaled_dot_product_cudnn_attention_backward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + schema_string: aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor) arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152833,43 +160974,74 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: embed_dim - type: int64_t + name: out + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: num_head - type: int64_t + name: logsumexp + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_offset type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: attn_bias type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: cum_seq_q type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + kwarg_only: true + name: scale + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, ::std::optional) schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -152886,41 +161058,67 @@ name: value type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: embed_dim - type: int64_t + name: out + type: const at::Tensor & - annotation: null - dynamic_type: int64_t + dynamic_type: at::Tensor is_nullable: false - name: num_head - type: int64_t + name: logsumexp + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_offset type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: attn_bias type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: cum_seq_q type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: cum_seq_k + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: scale + type: ::std::optional method_of: - Type - namespace @@ -152928,7 +161126,13 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: result + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 type: at::Tensor inplace: false is_factory_method: false @@ -152937,315 +161141,409 @@ with_gil: false deprecated: false has_math_kernel: false -- name: special_airy_ai - operator_name: special_airy_ai +- name: _flash_attention_forward + operator_name: _flash_attention_forward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::special_airy_ai(Tensor x) -> Tensor + schema_string: aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None, Tensor? seqused_k=None, Tensor? alibi_slopes=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: query type: const at::Tensor & - schema_order_cpp_signature: at::Tensor (const at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: key type: const at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: special - returns: - - dynamic_type: at::Tensor - name: result - type: at::Tensor - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: special_airy_ai_out - operator_name: special_airy_ai - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: x + name: value type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor - is_nullable: false - name: x - type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: special - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: true - with_gil: false - deprecated: false - has_math_kernel: false -- name: _transformer_decoder_only_layer_fwd - operator_name: _transformer_decoder_only_layer_fwd - overload_name: '' - manual_kernel_registration: false - category_override: '' - schema_string: aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor) - arguments: + is_nullable: true + name: cum_seq_q + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor - is_nullable: false - name: src - type: const at::Tensor & + is_nullable: true + name: cum_seq_k + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_heads + name: max_k type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: return_debug_mask + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqused_k + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: alibi_slopes + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, double, bool, bool, ::std::optional, ::std::optional, ::std::optional, const ::std::optional &, const ::std::optional &) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: cum_seq_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cum_seq_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t is_nullable: false - name: proj_bias - type: const at::Tensor & + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null dynamic_type: bool is_nullable: false - name: use_gelu + name: is_causal type: bool - annotation: null dynamic_type: bool is_nullable: false - name: norm_first + name: return_debug_mask type: bool - annotation: null + default: ::std::nullopt dynamic_type: double - is_nullable: false - name: eps - type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: seqused_k + type: const ::std::optional & - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: alibi_slopes + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: softmax_logsumexp + name: softmax_logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: at::Tensor + field_name: debug_attn_mask + name: debug_attn_mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _flash_attention_backward + operator_name: _flash_attention_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None) -> (Tensor, Tensor, Tensor) + arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_2 + name: grad_out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_2 + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_1 + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_1 + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_2 + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_2 + name: logsumexp type: const at::Tensor & - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: incr_key - type: const c10::optional & - - annotation: null - default: '{}' dynamic_type: at::Tensor - is_nullable: true - name: incr_value - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &) - schema_order_arguments: + is_nullable: false + name: cum_seq_q + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: src + name: cum_seq_k type: const at::Tensor & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_heads + name: max_k type: int64_t - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: dropout_p + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: is_causal + type: bool - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: philox_offset type: const at::Tensor & - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_left + type: ::std::optional - annotation: null - dynamic_type: double + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size_right + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, ::std::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: eps - type: double + name: grad_out + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_1 + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_1 + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_weight_2 + name: value type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: norm_bias_2 + name: out type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_1 + name: logsumexp type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_1 + name: cum_seq_q type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_weight_2 + name: cum_seq_k type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_k + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_causal + type: bool - annotation: null dynamic_type: at::Tensor is_nullable: false - name: ffn_bias_2 + name: philox_seed type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: double is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: scale + type: ::std::optional - annotation: null - default: '{}' - dynamic_type: at::Tensor + default: ::std::nullopt + dynamic_type: int64_t is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: window_size_left + type: ::std::optional - annotation: null - default: '{}' - dynamic_type: at::Tensor + default: ::std::nullopt + dynamic_type: int64_t is_nullable: true - name: incr_value - type: const c10::optional & + kwarg_only: true + name: window_size_right + type: ::std::optional method_of: - Type - namespace @@ -153268,12 +161566,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _native_decoder_only_multi_head_attention - operator_name: _native_decoder_only_multi_head_attention +- name: _efficient_attention_forward + operator_name: _efficient_attention_forward overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor) + schema_string: aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt? max_seqlen_q, SymInt? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? seqlen_k=None, int? window_size=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k) arguments: - annotation: null dynamic_type: at::Tensor @@ -153290,68 +161588,196 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_q + type: ::std::optional - annotation: null dynamic_type: int64_t + is_nullable: true + name: max_seqlen_k + type: ::std::optional + - annotation: null + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + name: dropout_p + type: double - annotation: null dynamic_type: int64_t is_nullable: false - name: num_head + name: custom_mask_type type: int64_t - annotation: null - dynamic_type: at::Tensor + default: false + dynamic_type: bool is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: compute_log_sumexp + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqlen_k + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, ::std::optional, ::std::optional, double, int64_t, bool, ::std::optional, const ::std::optional &, ::std::optional) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: query type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: key type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: value type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + name: bias + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + name: cu_seqlens_q + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & + name: cu_seqlens_k + type: const ::std::optional & - annotation: null - default: true - dynamic_type: bool + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_q + type: ::std::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: max_seqlen_k + type: ::std::optional + - annotation: null + dynamic_type: double is_nullable: false - name: need_weights - type: bool + name: dropout_p + type: double - annotation: null - default: true + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: average_attn_weights + name: compute_log_sumexp type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, bool) - schema_order_arguments: + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: seqlen_k + type: const ::std::optional & + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logsumexp + name: logsumexp + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_seed + name: philox_seed + type: at::Tensor + - dynamic_type: at::Tensor + field_name: philox_offset + name: philox_offset + type: at::Tensor + - dynamic_type: int64_t + field_name: max_seqlen_batch_q + name: max_seqlen_batch_q + type: c10::SymInt + - dynamic_type: int64_t + field_name: max_seqlen_batch_k + name: max_seqlen_batch_k + type: c10::SymInt + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _efficient_attention_backward + operator_name: _efficient_attention_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None, int? window_size=None, bool shared_storage_dqdkdv=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out_ + type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -153367,65 +161793,203 @@ is_nullable: false name: value type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_q + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cu_seqlens_k + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false - name: embed_dim + name: max_seqlen_q type: int64_t - annotation: null dynamic_type: int64_t is_nullable: false - name: num_head + name: max_seqlen_k type: int64_t - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_weight + name: logsumexp type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double - annotation: null dynamic_type: at::Tensor is_nullable: false - name: qkv_bias + name: philox_seed type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_weight + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_requires_grad + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: num_splits_key + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: shared_storage_dqdkdv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, const at::Tensor &, double, const at::Tensor &, const at::Tensor &, int64_t, bool, ::std::optional, ::std::optional, ::std::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out_ type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: out + type: const at::Tensor & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + name: cu_seqlens_q + type: const ::std::optional & - annotation: null - default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & + name: cu_seqlens_k + type: const ::std::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_seqlen_q + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_seqlen_k + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: logsumexp + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_seed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: philox_offset + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: custom_mask_type + type: int64_t - annotation: null - default: true dynamic_type: bool is_nullable: false - name: need_weights + name: bias_requires_grad type: bool - annotation: null - default: true + default: ::std::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: scale + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: num_splits_key + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: window_size + type: ::std::optional + - annotation: null + default: false dynamic_type: bool is_nullable: false - name: average_attn_weights + kwarg_only: true + name: shared_storage_dqdkdv type: bool method_of: - Type @@ -153452,6 +162016,349 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _triton_scaled_dot_attention + operator_name: _triton_scaled_dot_attention + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: k + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: k + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fill_mem_eff_dropout_mask_ + operator_name: _fill_mem_eff_dropout_mask_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: seed + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout_p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: seed + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _triton_multi_head_attention + operator_name: _triton_multi_head_attention + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_airy_ai + operator_name: special_airy_ai + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_airy_ai(Tensor x) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_airy_ai_out + operator_name: special_airy_ai + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false - name: special_bessel_j0 operator_name: special_bessel_j0 overload_name: '' @@ -153873,11 +162780,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t operator_name: special_chebyshev_polynomial_t overload_name: n_scalar @@ -153918,11 +162825,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t_out operator_name: special_chebyshev_polynomial_t overload_name: out @@ -154036,11 +162943,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_t_out operator_name: special_chebyshev_polynomial_t overload_name: n_scalar_out @@ -154185,11 +163092,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u operator_name: special_chebyshev_polynomial_u overload_name: n_scalar @@ -154230,11 +163137,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u_out operator_name: special_chebyshev_polynomial_u overload_name: out @@ -154348,11 +163255,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_u_out operator_name: special_chebyshev_polynomial_u overload_name: n_scalar_out @@ -154497,11 +163404,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v operator_name: special_chebyshev_polynomial_v overload_name: n_scalar @@ -154542,11 +163449,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v_out operator_name: special_chebyshev_polynomial_v overload_name: out @@ -154660,11 +163567,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_v_out operator_name: special_chebyshev_polynomial_v overload_name: n_scalar_out @@ -154809,11 +163716,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w operator_name: special_chebyshev_polynomial_w overload_name: n_scalar @@ -154854,11 +163761,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w_out operator_name: special_chebyshev_polynomial_w overload_name: out @@ -154972,11 +163879,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_chebyshev_polynomial_w_out operator_name: special_chebyshev_polynomial_w overload_name: n_scalar_out @@ -155121,11 +164028,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h operator_name: special_hermite_polynomial_h overload_name: n_scalar @@ -155166,11 +164073,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h_out operator_name: special_hermite_polynomial_h overload_name: out @@ -155284,11 +164191,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_h_out operator_name: special_hermite_polynomial_h overload_name: n_scalar_out @@ -155433,11 +164340,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he operator_name: special_hermite_polynomial_he overload_name: n_scalar @@ -155478,11 +164385,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he_out operator_name: special_hermite_polynomial_he overload_name: out @@ -155596,11 +164503,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_hermite_polynomial_he_out operator_name: special_hermite_polynomial_he overload_name: n_scalar_out @@ -155745,11 +164652,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l operator_name: special_laguerre_polynomial_l overload_name: n_scalar @@ -155790,11 +164697,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l_out operator_name: special_laguerre_polynomial_l overload_name: out @@ -155908,11 +164815,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_laguerre_polynomial_l_out operator_name: special_laguerre_polynomial_l overload_name: n_scalar_out @@ -156057,11 +164964,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p operator_name: special_legendre_polynomial_p overload_name: n_scalar @@ -156102,11 +165009,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p_out operator_name: special_legendre_polynomial_p overload_name: out @@ -156220,11 +165127,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_legendre_polynomial_p_out operator_name: special_legendre_polynomial_p overload_name: n_scalar_out @@ -156873,11 +165780,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t operator_name: special_shifted_chebyshev_polynomial_t overload_name: n_scalar @@ -156918,11 +165825,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t_out operator_name: special_shifted_chebyshev_polynomial_t overload_name: out @@ -157036,11 +165943,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_t_out operator_name: special_shifted_chebyshev_polynomial_t overload_name: n_scalar_out @@ -157185,11 +166092,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u operator_name: special_shifted_chebyshev_polynomial_u overload_name: n_scalar @@ -157230,11 +166137,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u_out operator_name: special_shifted_chebyshev_polynomial_u overload_name: out @@ -157348,11 +166255,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_u_out operator_name: special_shifted_chebyshev_polynomial_u overload_name: n_scalar_out @@ -157497,11 +166404,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v operator_name: special_shifted_chebyshev_polynomial_v overload_name: n_scalar @@ -157542,11 +166449,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v_out operator_name: special_shifted_chebyshev_polynomial_v overload_name: out @@ -157660,11 +166567,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_v_out operator_name: special_shifted_chebyshev_polynomial_v overload_name: n_scalar_out @@ -157809,11 +166716,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w operator_name: special_shifted_chebyshev_polynomial_w overload_name: n_scalar @@ -157854,11 +166761,11 @@ type: at::Tensor inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w_out operator_name: special_shifted_chebyshev_polynomial_w overload_name: out @@ -157972,11 +166879,11 @@ type: at::Tensor & inplace: false is_factory_method: false - abstract: false + abstract: true device_guard: true with_gil: false deprecated: false - has_math_kernel: true + has_math_kernel: false - name: special_shifted_chebyshev_polynomial_w_out operator_name: special_shifted_chebyshev_polynomial_w overload_name: n_scalar_out @@ -158278,15 +167185,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -158366,14 +167273,208 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_ + operator_name: _fused_adam_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -158472,15 +167573,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: a! dynamic_type: at::TensorList @@ -158560,14 +167661,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -158581,6 +167682,720 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _fused_adamw_ + operator_name: _fused_adamw_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_sgd_ + operator_name: _fused_sgd_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_sgd_ + operator_name: _fused_sgd_ + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: nesterov + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adagrad_ + operator_name: _fused_adagrad_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: state_sums + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: state_sums + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _propagate_xla_data + operator_name: _propagate_xla_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_propagate_xla_data(Tensor input, Tensor output) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true - name: _new_zeros_with_same_feature_meta_out operator_name: _new_zeros_with_same_feature_meta overload_name: out @@ -158970,7 +168785,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -158980,7 +168795,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159030,8 +168845,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const c10::optional &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const ::std::optional &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159052,7 +168867,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight_buf - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159062,7 +168877,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159112,7 +168927,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -159240,7 +169055,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159250,17 +169065,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159310,7 +169125,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159321,7 +169136,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159352,7 +169167,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159362,17 +169177,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -159422,7 +169237,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -159575,12 +169390,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159593,11 +169408,11 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -159735,8 +169550,8 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -159752,7 +169567,7 @@ dynamic_type: bool is_nullable: true name: train - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160051,7 +169866,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -160115,6 +169930,65 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _test_functorch_fallback_out + operator_name: _test_functorch_fallback + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: bartlett_window_out operator_name: bartlett_window overload_name: out @@ -160246,12 +170120,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -160277,7 +170151,7 @@ is_nullable: false name: output_zero_point type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160288,12 +170162,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -160367,13 +170241,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160386,12 +170260,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160433,13 +170307,13 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160452,12 +170326,12 @@ name: p type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -160500,13 +170374,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160520,12 +170394,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -160578,20 +170452,20 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t is_nullable: false name: reduction type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160608,13 +170482,13 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: pos_weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: at::Reduction::Mean dynamic_type: int64_t @@ -160668,14 +170542,14 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t is_nullable: false name: minlength type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -160687,7 +170561,7 @@ dynamic_type: at::Tensor is_nullable: true name: weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -160950,7 +170824,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -160973,7 +170847,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161004,7 +170878,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161020,7 +170894,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161079,7 +170953,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -161262,7 +171136,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -161285,7 +171159,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161316,7 +171190,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161332,7 +171206,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161391,7 +171265,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -161564,7 +171438,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -161587,7 +171461,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -161638,7 +171512,7 @@ is_nullable: false name: allow_tf32 type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -161654,7 +171528,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -162089,12 +171963,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162102,11 +171976,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -162357,17 +172231,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -162383,7 +172257,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162399,17 +172273,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -162525,22 +172399,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -162551,7 +172425,7 @@ is_nullable: false name: reserveSpace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -162572,22 +172446,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -162641,141 +172515,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: cudnn_convolution_out - operator_name: cudnn_convolution - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - - annotation: null - dynamic_type: bool - is_nullable: false - name: benchmark - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: deterministic - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: allow_tf32 - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - - annotation: null - dynamic_type: bool - is_nullable: false - name: benchmark - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: deterministic - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: allow_tf32 - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false - name: cudnn_convolution_transpose_out operator_name: cudnn_convolution_transpose overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -162914,7 +172659,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163023,7 +172768,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -163169,7 +172914,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163192,7 +172937,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163213,7 +172958,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -163229,7 +172974,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163278,7 +173023,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -163306,12 +173051,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -163332,7 +173077,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -163353,12 +173098,12 @@ dynamic_type: const at::Scalar & is_nullable: true name: alpha - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -164180,8 +173925,8 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164198,7 +173943,7 @@ is_nullable: true kwarg_only: true name: rounding_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -164623,7 +174368,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164636,7 +174381,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164676,7 +174421,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164815,7 +174560,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164828,7 +174573,7 @@ is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const ::std::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -164868,7 +174613,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -164992,14 +174737,14 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t is_nullable: false name: padding_idx type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const ::std::optional &, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165045,7 +174790,7 @@ dynamic_type: at::Tensor is_nullable: true name: per_sample_weights - type: const c10::optional & + type: const ::std::optional & - annotation: null default: -1 dynamic_type: int64_t @@ -165210,15 +174955,15 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165230,14 +174975,14 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165261,12 +175006,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_empty_out - operator_name: new_empty +- name: empty_permuted_out + operator_name: empty_permuted overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165276,26 +175021,26 @@ output: true type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: self - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: physical_layout type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::IntArrayRef is_nullable: false - name: self - type: const at::Tensor & + name: size + type: at::IntArrayRef - annotation: null dynamic_type: at::IntArrayRef is_nullable: false - name: size + name: physical_layout type: at::IntArrayRef - allocate: true annotation: a! @@ -165320,12 +175065,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_empty_strided_out - operator_name: new_empty_strided +- name: new_empty_out + operator_name: new_empty overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165344,12 +175089,7 @@ is_nullable: false name: size type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165361,11 +175101,6 @@ is_nullable: false name: size type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165389,12 +175124,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_full_out - operator_name: new_full +- name: new_empty_strided_out + operator_name: new_empty_strided overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165414,11 +175149,11 @@ name: size type: at::IntArrayRef - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::IntArrayRef is_nullable: false - name: fill_value - type: const at::Scalar & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, const at::Scalar &, at::Tensor &) + name: stride + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165431,10 +175166,10 @@ name: size type: at::IntArrayRef - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::IntArrayRef is_nullable: false - name: fill_value - type: const at::Scalar & + name: stride + type: at::IntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165458,12 +175193,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: new_zeros_out - operator_name: new_zeros +- name: new_full_out + operator_name: new_full overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165482,7 +175217,76 @@ is_nullable: false name: size type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_zeros_out + operator_name: new_zeros + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165581,7 +175385,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165610,13 +175414,13 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, double, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, double, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165638,12 +175442,12 @@ name: zero_point type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165672,7 +175476,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165705,13 +175509,13 @@ name: axis type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -165737,12 +175541,12 @@ name: axis type: int64_t - annotation: null - default: MemoryFormat::Contiguous + default: c10::MemoryFormat::Contiguous dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165791,13 +175595,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165810,12 +175614,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -165857,13 +175661,13 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -165876,12 +175680,12 @@ name: size type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -165903,7 +175707,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -165972,7 +175776,7 @@ overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor + schema_string: aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor arguments: - annotation: null dynamic_type: at::Tensor @@ -166047,13 +175851,13 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -166066,12 +175870,12 @@ name: qtensor type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166115,13 +175919,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -166129,12 +175933,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166335,6 +176139,65 @@ with_gil: false deprecated: false has_math_kernel: false +- name: floor_divide_out + operator_name: floor_divide + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: full_out operator_name: full overload_name: names_out @@ -166364,8 +176227,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -166382,7 +176245,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166431,13 +176294,13 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -166450,12 +176313,12 @@ name: fill_value type: const at::Scalar & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -166499,18 +176362,18 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional - schema_order_cpp_signature: at::Tensor & (c10::string_view, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (c10::string_view, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: c10::string_view @@ -166518,17 +176381,17 @@ name: filename type: c10::string_view - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: shared - type: c10::optional + type: ::std::optional - annotation: null default: 0 dynamic_type: int64_t is_nullable: true name: size - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -167649,12 +177512,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167680,7 +177543,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167691,12 +177554,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167817,7 +177680,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167843,7 +177706,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167869,7 +177732,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -167958,10 +177821,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -167973,7 +177836,7 @@ is_nullable: false name: accumulate type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -167981,10 +177844,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168039,10 +177902,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168060,7 +177923,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168068,10 +177931,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168125,10 +177988,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168146,7 +178009,7 @@ is_nullable: false name: unsafe type: bool - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool) + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168154,10 +178017,10 @@ name: self type: const at::Tensor & - annotation: null - dynamic_type: const c10::List> & + dynamic_type: const c10::List<::std::optional> & is_nullable: true name: indices - type: const c10::List> & + type: const c10::List<::std::optional> & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -168282,18 +178145,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168309,12 +178172,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -168420,18 +178283,18 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168462,12 +178325,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: ::std::array is_nullable: false @@ -168658,8 +178521,8 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -168676,7 +178539,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170083,12 +179946,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: median_out - operator_name: median +- name: quantized_max_pool3d_out + operator_name: quantized_max_pool3d overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170102,13 +179965,79 @@ is_nullable: false name: self type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170132,12 +180061,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: nanmedian_out - operator_name: nanmedian +- name: median_out + operator_name: median overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170181,12 +180110,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _mps_convolution_out - operator_name: _mps_convolution +- name: nanmedian_out + operator_name: nanmedian overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170200,73 +180129,122 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: bias - type: const c10::optional & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: weight - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: bias - type: const c10::optional & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: padding - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: stride - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: dilation - type: at::IntArrayRef - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: groups - type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mps_convolution_out + operator_name: _mps_convolution + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t - allocate: true annotation: a! dynamic_type: at::Tensor @@ -170295,7 +180273,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -170448,7 +180426,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -170471,7 +180449,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -170492,7 +180470,7 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -170508,7 +180486,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -170912,17 +180890,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -170973,7 +180951,7 @@ is_nullable: false name: workspace type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171029,17 +181007,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171215,17 +181193,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171241,7 +181219,7 @@ is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171257,17 +181235,17 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -171373,28 +181351,28 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: epsilon type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171415,22 +181393,22 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -171484,7 +181462,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171507,7 +181485,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171538,7 +181516,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171554,7 +181532,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171613,7 +181591,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171636,7 +181614,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171672,7 +181650,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171688,7 +181666,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171752,7 +181730,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -171775,7 +181753,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171806,7 +181784,7 @@ is_nullable: false name: deterministic type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -171822,7 +181800,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -171942,7 +181920,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -171987,8 +181965,8 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172014,7 +181992,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172059,7 +182037,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -172187,7 +182165,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172197,17 +182175,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172252,7 +182230,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172263,7 +182241,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172294,7 +182272,7 @@ dynamic_type: at::Tensor is_nullable: true name: cx - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172304,17 +182282,17 @@ dynamic_type: at::Tensor is_nullable: true name: grad_output - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: int64_t is_nullable: false @@ -172359,7 +182337,7 @@ dynamic_type: at::Tensor is_nullable: true name: dropout_state - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172545,12 +182523,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172576,7 +182554,7 @@ is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, bool, double, double) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, bool, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172587,12 +182565,12 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172648,12 +182626,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: batch_norm_stats_out - operator_name: batch_norm_stats +- name: _native_batch_norm_legit_no_training_out + operator_name: _native_batch_norm_legit_no_training overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -172669,23 +182647,80 @@ name: out1 output: true type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: input type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double - annotation: null dynamic_type: double is_nullable: false name: eps type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: input type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double - annotation: null dynamic_type: double is_nullable: false @@ -172705,6 +182740,13 @@ name: out1 output: true type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & method_of: - Type - namespace @@ -172717,6 +182759,9 @@ - dynamic_type: at::Tensor name: out1 type: at::Tensor & + - dynamic_type: at::Tensor + name: out2 + type: at::Tensor & inplace: false is_factory_method: false abstract: true @@ -172724,12 +182769,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: batch_norm_gather_stats_out - operator_name: batch_norm_gather_stats +- name: batch_norm_stats_out + operator_name: batch_norm_stats overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -172751,42 +182796,76 @@ name: input type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: mean - type: const at::Tensor & + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, at::Tensor &, at::Tensor &) + schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: invstd + name: input type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: running_mean - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: running_var - type: const c10::optional & - - annotation: null - dynamic_type: double - is_nullable: false - name: momentum - type: double - annotation: null dynamic_type: double is_nullable: false name: eps type: double - - annotation: null - dynamic_type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor is_nullable: false - name: count - type: int64_t - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, int64_t, at::Tensor &, at::Tensor &) - schema_order_arguments: + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out0 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out1 + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats_out + operator_name: batch_norm_gather_stats + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -172806,12 +182885,54 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -172900,12 +183021,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -172921,7 +183042,7 @@ is_nullable: false name: counts type: const at::Tensor & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, double, double, const at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -172942,12 +183063,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -173038,27 +183159,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173074,7 +183195,7 @@ is_nullable: false name: output_mask type: ::std::array - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173090,27 +183211,27 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: save_invstd - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173228,7 +183349,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173244,7 +183365,7 @@ is_nullable: false name: bias_g type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173270,7 +183391,7 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: bool is_nullable: false @@ -173344,7 +183465,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -173377,23 +183498,23 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false name: count type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173419,16 +183540,16 @@ dynamic_type: at::Tensor is_nullable: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy + name: sum_dy type: const at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: mean_dy_xmu + name: sum_dy_xmu type: const at::Tensor & - annotation: null dynamic_type: at::Tensor @@ -173488,18 +183609,18 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false name: momentum type: double - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173510,12 +183631,12 @@ dynamic_type: at::Tensor is_nullable: true name: running_mean - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: running_var - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: double is_nullable: false @@ -173559,7 +183680,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -173582,7 +183703,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -173596,7 +183717,7 @@ name: stride size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173612,7 +183733,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -173673,8 +183794,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -173686,7 +183807,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -173730,13 +183851,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173744,12 +183865,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -173865,8 +183986,8 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -173887,7 +184008,7 @@ dynamic_type: int64_t is_nullable: true name: compute_mode - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174263,7 +184384,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174337,12 +184458,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174350,11 +184471,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174451,8 +184572,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174464,7 +184585,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174512,14 +184633,14 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174531,13 +184652,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174581,13 +184702,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174595,12 +184716,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174629,7 +184750,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174649,13 +184770,13 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174668,12 +184789,12 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174702,7 +184823,7 @@ overload_name: low_dtype_out manual_kernel_registration: false category_override: '' - schema_string: aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -174727,13 +184848,13 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174751,12 +184872,12 @@ name: high type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174804,8 +184925,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174817,7 +184938,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174865,14 +184986,14 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -174884,13 +185005,13 @@ is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - annotation: null dynamic_type: at::DimnameList is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -174934,13 +185055,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -174948,12 +185069,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175041,7 +185162,7 @@ overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -175056,13 +185177,13 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175070,12 +185191,12 @@ name: repeats type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: output_size - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175477,24 +185598,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175513,17 +185634,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -175762,12 +185883,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175790,11 +185911,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -175974,13 +186095,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -175988,12 +186109,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -176022,7 +186143,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -176044,19 +186165,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -176064,7 +186185,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176072,19 +186193,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -176145,13 +186266,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176159,12 +186280,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -176321,7 +186442,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -176807,6 +186928,55 @@ with_gil: false deprecated: false has_math_kernel: false +- name: _nested_tensor_storage_offsets_out + operator_name: _nested_tensor_storage_offsets + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false - name: _nested_from_padded_and_nested_example_out operator_name: _nested_from_padded_and_nested_example overload_name: out @@ -176871,7 +187041,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -176896,11 +187066,11 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -176918,131 +187088,297 @@ name: nested_strides type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false name: offsets + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_view_from_jagged_copy_out + operator_name: _nested_view_from_jagged_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: lengths + type: const ::std::optional & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: ragged_idx + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min_seqlen + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max_seqlen + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nested_get_values_copy_out + operator_name: _nested_get_values_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _trilinear_out + operator_name: _trilinear + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 type: at::IntArrayRef - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _trilinear_out - operator_name: _trilinear - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i3 - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand1 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand2 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand3 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: sumdim - type: at::IntArrayRef - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: unroll_dim - type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: i3 - type: const at::Tensor & - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand1 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand2 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: expand3 - type: at::IntArrayRef - - annotation: null - dynamic_type: at::IntArrayRef - is_nullable: false - name: sumdim - type: at::IntArrayRef - - annotation: null - default: 1 - dynamic_type: int64_t - is_nullable: false - name: unroll_dim - type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t - allocate: true annotation: a! dynamic_type: at::Tensor @@ -177331,12 +187667,12 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -177356,11 +187692,11 @@ name: return_counts type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -177704,7 +188040,7 @@ overload_name: correction_out manual_kernel_registration: false category_override: '' - schema_string: aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + schema_string: aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) arguments: - allocate: true annotation: a! @@ -177726,19 +188062,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -177746,7 +188082,7 @@ kwarg_only: true name: keepdim type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -177754,19 +188090,19 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: dim size: 1 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt - dynamic_type: int64_t + default: ::std::nullopt + dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: correction - type: c10::optional + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -178025,8 +188361,8 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::IntArrayRef @@ -178038,7 +188374,7 @@ is_nullable: true kwarg_only: true name: names - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178067,7 +188403,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -178131,13 +188467,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178145,12 +188481,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178253,12 +188589,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178266,11 +188602,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178383,12 +188719,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178396,11 +188732,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178444,12 +188780,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178457,11 +188793,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178510,12 +188846,12 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178528,11 +188864,11 @@ name: prob type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178640,7 +188976,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -178656,8 +188992,8 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178668,7 +189004,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -178684,7 +189020,7 @@ dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178708,51 +189044,85 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_sum_out - operator_name: _sparse_sum - overload_name: dim_out +- name: _batch_norm_with_update_functional + operator_name: _batch_norm_with_update_functional + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out) arguments: - - allocate: true - annotation: a! + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: out - output: true - type: at::Tensor & + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: running_mean type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor is_nullable: false - name: dim - size: 1 - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: input type: const at::Tensor & - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor is_nullable: false - name: dim - size: 1 - type: at::IntArrayRef - - allocate: true - annotation: a! + name: running_mean + type: const at::Tensor & + - annotation: null dynamic_type: at::Tensor is_nullable: false - name: out - output: true - type: at::Tensor & + name: running_var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double method_of: - Type - namespace @@ -178760,8 +189130,25 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out - type: at::Tensor & + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + field_name: running_mean_out + name: running_mean_out + type: at::Tensor + - dynamic_type: at::Tensor + field_name: running_var_out + name: running_var_out + type: at::Tensor inplace: false is_factory_method: false abstract: true @@ -178769,57 +189156,139 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_sum_backward_out - operator_name: _sparse_sum_backward +- name: _batch_norm_no_update_out + operator_name: _batch_norm_no_update overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) arguments: - allocate: true annotation: a! dynamic_type: at::Tensor is_nullable: false - name: out + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out3 output: true type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad + name: input type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double is_nullable: false - name: self - type: const at::Tensor & + name: momentum + type: double - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: double is_nullable: false - name: dim - type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: grad + name: input type: const at::Tensor & - annotation: null dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const ::std::optional & + - annotation: null + dynamic_type: double is_nullable: false - name: self - type: const at::Tensor & + name: momentum + type: double - annotation: null - dynamic_type: at::IntArrayRef + dynamic_type: double is_nullable: false - name: dim - type: at::IntArrayRef + name: eps + type: double - allocate: true annotation: a! dynamic_type: at::Tensor is_nullable: false - name: out + name: out0 + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: out1 + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: out2 + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: out3 output: true type: at::Tensor & method_of: @@ -178829,7 +189298,16 @@ python_module: '' returns: - dynamic_type: at::Tensor - name: out + name: out0 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out1 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out2 + type: at::Tensor & + - dynamic_type: at::Tensor + name: out3 type: at::Tensor & inplace: false is_factory_method: false @@ -178838,12 +189316,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _sparse_csr_sum_out - operator_name: _sparse_csr_sum - overload_name: dim_dtype_out +- name: _sparse_sum_out + operator_name: _sparse_sum + overload_name: dim_out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -178863,20 +189341,7 @@ name: dim size: 1 type: at::IntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178889,19 +189354,162 @@ name: dim size: 1 type: at::IntArrayRef - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - name: keepdim - type: bool - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - kwarg_only: true - name: dtype - type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum_backward_out + operator_name: _sparse_sum_backward + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_csr_sum_out + operator_name: _sparse_csr_sum + overload_name: dim_dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -178957,13 +189565,13 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -178983,12 +189591,12 @@ name: keepdim type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true kwarg_only: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179338,12 +189946,12 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179361,11 +189969,11 @@ name: shape type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179412,14 +190020,14 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false kwarg_only: true name: dtype type: at::ScalarType - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::ScalarType, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const ::std::optional &, at::ScalarType, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179430,7 +190038,7 @@ dynamic_type: const at::Scalar & is_nullable: true name: p - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::ScalarType is_nullable: false @@ -179541,13 +190149,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179555,12 +190163,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179609,13 +190217,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, const at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, const at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179628,12 +190236,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -179675,13 +190283,13 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -179694,12 +190302,12 @@ name: the_template type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -180339,7 +190947,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -180373,7 +190981,14 @@ is_nullable: false name: values type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: int64_t @@ -180400,6 +191015,13 @@ is_nullable: false name: values type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: is_coalesced + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -180770,12 +191392,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _to_dense_out - operator_name: _to_dense +- name: _sparse_mask_projection_out + operator_name: _sparse_mask_projection overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -180789,132 +191411,215 @@ is_nullable: false name: self type: const at::Tensor & - - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) - schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self + name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt - dynamic_type: at::ScalarType - is_nullable: true - name: dtype - type: c10::optional - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _coalesce_out - operator_name: _coalesce - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor + default: false + dynamic_type: bool is_nullable: false - name: self - type: const at::Tensor & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + name: accumulate_matches + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: false name: self type: const at::Tensor & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _coalesced_out - operator_name: _coalesced - overload_name: out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null dynamic_type: at::Tensor is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: coalesced - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self + name: mask type: const at::Tensor & - annotation: null + default: false dynamic_type: bool is_nullable: false - name: coalesced + name: accumulate_matches + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _to_dense_out + operator_name: _to_dense + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + - annotation: null + default: ::std::nullopt + dynamic_type: bool + is_nullable: true + name: masked_grad + type: ::std::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesce_out + operator_name: _coalesce + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesced_out + operator_name: _coalesced + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced type: bool - allocate: true annotation: a! @@ -181112,12 +191817,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_out - operator_name: to_sparse +- name: _to_sparse_out + operator_name: _to_sparse overload_name: sparse_dim_out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181171,12 +191876,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_out - operator_name: to_sparse +- name: _to_sparse_out + operator_name: _to_sparse overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181191,14 +191896,14 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -181206,13 +191911,13 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::OptionalIntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::OptionalIntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181220,14 +191925,14 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true kwarg_only: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true kwarg_only: true @@ -181235,12 +191940,12 @@ size: 2 type: at::OptionalIntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true kwarg_only: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181264,12 +191969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_csr_out - operator_name: to_sparse_csr +- name: _to_sparse_csr_out + operator_name: _to_sparse_csr overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181284,12 +191989,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181297,11 +192002,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181325,12 +192030,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_csc_out - operator_name: to_sparse_csc +- name: _to_sparse_csc_out + operator_name: _to_sparse_csc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181345,12 +192050,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181358,11 +192063,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181386,12 +192091,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_bsr_out - operator_name: to_sparse_bsr +- name: _to_sparse_bsr_out + operator_name: _to_sparse_bsr overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181412,12 +192117,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181431,11 +192136,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181459,12 +192164,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: to_sparse_bsc_out - operator_name: to_sparse_bsc +- name: _to_sparse_bsc_out + operator_name: _to_sparse_bsc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181485,12 +192190,12 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181504,11 +192209,11 @@ size: 2 type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dense_dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181552,12 +192257,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181565,11 +192270,11 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -181598,7 +192303,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181640,7 +192345,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -181680,7 +192385,7 @@ name: groups type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: input_size @@ -181713,7 +192418,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -181754,7 +192459,13 @@ is_nullable: false name: groups type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::Tensor &) + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::OptionalIntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -181788,6 +192499,12 @@ is_nullable: false name: groups type: int64_t + - annotation: null + default: ::std::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: input_size + type: at::OptionalIntArrayRef - allocate: true annotation: a! dynamic_type: at::Tensor @@ -183540,13 +194257,13 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -183561,12 +194278,12 @@ name: non_blocking type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::MemoryFormat is_nullable: true kwarg_only: true name: memory_format - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -183809,7 +194526,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + schema_string: aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () arguments: - allocate: true annotation: a! @@ -183834,96 +194551,96 @@ type: at::TensorList - annotation: null dynamic_type: at::Tensor - is_nullable: false + is_nullable: true name: grad_y + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const ::std::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: z_state type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cell_state_fwd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: layersOutputs + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: void (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_y + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: z_state - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: cell_state_fwd - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: input - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: layersOutputs - type: const at::Tensor & - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: hx - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: params - type: at::TensorList - - annotation: null - dynamic_type: bool - is_nullable: false - name: has_biases - type: bool - - annotation: null - dynamic_type: int64_t - is_nullable: false - name: num_layers - type: int64_t - - annotation: null - dynamic_type: double - is_nullable: false - name: dropout - type: double - - annotation: null - dynamic_type: bool - is_nullable: false - name: train - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: bidirectional - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: batch_first - type: bool - schema_order_cpp_signature: void (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: grad_y - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: grad_hy - type: const c10::optional & - - annotation: null - dynamic_type: at::Tensor - is_nullable: true - name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184066,14 +194783,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -184095,13 +194812,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -184177,12 +194894,12 @@ dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184203,18 +194920,18 @@ is_nullable: false name: has_bias type: bool - schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_cpp_signature: ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_hy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: true name: grad_cy - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::Tensor is_nullable: false @@ -184319,14 +195036,14 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -184348,13 +195065,13 @@ dynamic_type: at::Tensor is_nullable: true name: input_bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true name: hidden_bias - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -185401,18 +196118,18 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -185425,17 +196142,17 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -185489,12 +196206,12 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -185512,11 +196229,11 @@ name: mask type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: dim - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186338,15 +197055,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186362,14 +197079,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186414,15 +197131,15 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186438,14 +197155,14 @@ dynamic_type: int64_t is_nullable: true name: to - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186487,13 +197204,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186506,12 +197223,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186553,13 +197270,13 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186572,12 +197289,12 @@ name: to type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186614,13 +197331,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186628,12 +197345,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186670,13 +197387,13 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186684,12 +197401,12 @@ name: self type: const at::Tensor & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186738,13 +197455,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186764,12 +197481,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186818,13 +197535,13 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186844,12 +197561,12 @@ name: to type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -186898,13 +197615,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -186924,12 +197641,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -186978,13 +197695,13 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187004,12 +197721,12 @@ name: sigma type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187058,13 +197775,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187084,12 +197801,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187138,13 +197855,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187164,12 +197881,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187212,13 +197929,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187232,12 +197949,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187280,13 +197997,13 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187300,12 +198017,12 @@ name: lambd type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187347,13 +198064,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187366,12 +198083,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -187413,13 +198130,13 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + type: ::std::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, ::std::optional) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187432,12 +198149,12 @@ name: p type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional method_of: - Type - namespace @@ -187810,19 +198527,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187830,7 +198547,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: void (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool, at::TensorList) + schema_order_cpp_signature: void (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187843,19 +198560,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187908,19 +198625,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -187928,7 +198645,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -187941,19 +198658,19 @@ name: bins type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ArrayRef is_nullable: true kwarg_only: true name: range - type: c10::optional> + type: ::std::optional> - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188014,7 +198731,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188022,7 +198739,7 @@ kwarg_only: true name: density type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::TensorList, const c10::optional &, bool, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::TensorList, const ::std::optional &, bool, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -188040,7 +198757,7 @@ is_nullable: true kwarg_only: true name: weight - type: const c10::optional & + type: const ::std::optional & - annotation: null default: false dynamic_type: bool @@ -188130,95 +198847,6 @@ with_gil: false deprecated: false has_math_kernel: false -- name: argsort_out - operator_name: argsort - overload_name: stable_out - manual_kernel_registration: false - category_override: '' - schema_string: aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) - arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: stable - type: bool - - annotation: null - default: -1 - dynamic_type: int64_t - is_nullable: false - kwarg_only: true - name: dim - type: int64_t - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: descending - type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, int64_t, bool, at::Tensor &) - schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: self - type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: stable - type: bool - - annotation: null - default: -1 - dynamic_type: int64_t - is_nullable: false - kwarg_only: true - name: dim - type: int64_t - - annotation: null - default: false - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: descending - type: bool - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false - name: unfold_backward_out operator_name: unfold_backward overload_name: out @@ -188340,13 +198968,13 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -188366,12 +198994,12 @@ name: std type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Generator is_nullable: true kwarg_only: true name: generator - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -188765,12 +199393,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188785,11 +199413,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188797,9 +199432,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -188821,12 +199463,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_out - operator_name: _foreach_mul - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188841,11 +199483,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188853,10 +199495,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -188877,12 +199519,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_out - operator_name: _foreach_div - overload_name: Scalar_out +- name: _foreach_add_out + operator_name: _foreach_add + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188897,11 +199539,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -188909,9 +199558,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -188933,12 +199589,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_out - operator_name: _foreach_clamp_min +- name: _foreach_sub_out + operator_name: _foreach_sub overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -188989,12 +199645,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max - overload_name: Scalar_out +- name: _foreach_sub_out + operator_name: _foreach_sub + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189009,11 +199665,18 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189021,9 +199684,16 @@ name: self type: at::TensorList - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: scalar + kwarg_only: true + name: alpha type: const at::Scalar & - allocate: true annotation: a! @@ -189045,12 +199715,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_out - operator_name: _foreach_maximum - overload_name: Scalar_out +- name: _foreach_sub_out + operator_name: _foreach_sub + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189065,11 +199735,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189077,10 +199747,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - name: scalar - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189101,12 +199771,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum +- name: _foreach_mul_out + operator_name: _foreach_mul overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189157,12 +199827,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_out - operator_name: _foreach_add +- name: _foreach_mul_out + operator_name: _foreach_mul overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189181,14 +199851,7 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189200,13 +199863,6 @@ is_nullable: false name: other type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & - is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189227,12 +199883,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: List_out +- name: _foreach_mul_out + operator_name: _foreach_mul + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189247,18 +199903,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189266,17 +199915,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: other - type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::ArrayRef is_nullable: false - kwarg_only: true - name: alpha - type: const at::Scalar & + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189299,10 +199941,10 @@ has_math_kernel: false - name: _foreach_mul_out operator_name: _foreach_mul - overload_name: List_out + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189317,11 +199959,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189329,10 +199971,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189355,10 +199997,10 @@ has_math_kernel: false - name: _foreach_div_out operator_name: _foreach_div - overload_name: List_out + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189373,11 +200015,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189385,10 +200027,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189409,12 +200051,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_min_out - operator_name: _foreach_clamp_min +- name: _foreach_div_out + operator_name: _foreach_div overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189465,12 +200107,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max - overload_name: List_out +- name: _foreach_div_out + operator_name: _foreach_div + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189485,11 +200127,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189497,10 +200139,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::ArrayRef is_nullable: false - name: other - type: at::TensorList + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189521,12 +200163,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_maximum_out - operator_name: _foreach_maximum - overload_name: List_out +- name: _foreach_div_out + operator_name: _foreach_div + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189541,11 +200183,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189553,10 +200195,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: at::Tensor is_nullable: false name: other - type: at::TensorList + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189577,12 +200219,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum - overload_name: List_out +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189597,11 +200239,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189609,10 +200251,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + dynamic_type: const at::Scalar & is_nullable: false - name: other - type: at::TensorList + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189633,12 +200275,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_add_out - operator_name: _foreach_add - overload_name: ScalarList_out +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189653,78 +200295,22 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: self - type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - - allocate: true - annotation: a! dynamic_type: at::TensorList is_nullable: false - name: out - output: true - type: at::TensorList - method_of: - - Type - - namespace - mode: native - python_module: '' - returns: [] - inplace: false - is_factory_method: false - abstract: true - device_guard: false - with_gil: false - deprecated: false - has_math_kernel: false -- name: _foreach_sub_out - operator_name: _foreach_sub - overload_name: ScalarList_out - manual_kernel_registration: false - category_override: '' - schema_string: aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () - arguments: - - allocate: true - annotation: a! - dynamic_type: at::TensorList - is_nullable: false - name: out - output: true + name: other type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: self + name: other type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189745,12 +200331,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_div_out - operator_name: _foreach_div +- name: _foreach_clamp_max_out + operator_name: _foreach_clamp_max overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189801,12 +200387,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_mul_out - operator_name: _foreach_mul - overload_name: ScalarList_out +- name: _foreach_clamp_min_out + operator_name: _foreach_clamp_min + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189821,11 +200407,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189833,10 +200419,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189859,10 +200445,10 @@ has_math_kernel: false - name: _foreach_clamp_min_out operator_name: _foreach_clamp_min - overload_name: ScalarList_out + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189877,11 +200463,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -189889,10 +200475,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -189913,12 +200499,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_clamp_max_out - operator_name: _foreach_clamp_max +- name: _foreach_clamp_min_out + operator_name: _foreach_clamp_min overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189971,10 +200557,10 @@ has_math_kernel: false - name: _foreach_maximum_out operator_name: _foreach_maximum - overload_name: ScalarList_out + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -189989,11 +200575,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -190001,10 +200587,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: const at::Scalar & is_nullable: false - name: scalars - type: at::ArrayRef + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190025,12 +200611,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_minimum_out - operator_name: _foreach_minimum - overload_name: ScalarList_out +- name: _foreach_maximum_out + operator_name: _foreach_maximum + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190045,11 +200631,11 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -190057,10 +200643,10 @@ name: self type: at::TensorList - annotation: null - dynamic_type: at::ArrayRef + dynamic_type: at::TensorList is_nullable: false - name: scalars - type: at::ArrayRef + name: other + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190081,12 +200667,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_exp_out - operator_name: _foreach_exp - overload_name: out +- name: _foreach_maximum_out + operator_name: _foreach_maximum + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190100,13 +200686,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190127,12 +200723,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero_out - operator_name: _foreach_zero - overload_name: out +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190146,13 +200742,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190173,35 +200779,55 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_zero - operator_name: _foreach_zero - overload_name: '' +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + schema_string: aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: ::std::vector (at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::TensorList - field_name: self_out - name: self_out - type: ::std::vector + returns: [] inplace: false is_factory_method: false abstract: true @@ -190209,12 +200835,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sqrt_out - operator_name: _foreach_sqrt - overload_name: out +- name: _foreach_minimum_out + operator_name: _foreach_minimum + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190228,13 +200854,23 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190255,12 +200891,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_abs_out - operator_name: _foreach_abs - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190274,13 +200910,45 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190301,12 +200969,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_acos_out - operator_name: _foreach_acos - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190320,13 +200988,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190347,12 +201045,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_asin_out - operator_name: _foreach_asin - overload_name: out +- name: _foreach_addcdiv_out + operator_name: _foreach_addcdiv + overload_name: Tensor_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190366,13 +201064,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190393,12 +201121,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_atan_out - operator_name: _foreach_atan - overload_name: out +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190412,13 +201140,45 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190439,12 +201199,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_ceil_out - operator_name: _foreach_ceil - overload_name: out +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190458,13 +201218,43 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -190485,12 +201275,88 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cos_out - operator_name: _foreach_cos +- name: _foreach_addcmul_out + operator_name: _foreach_addcmul + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scalars + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_abs_out + operator_name: _foreach_abs overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190531,12 +201397,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_cosh_out - operator_name: _foreach_cosh +- name: _foreach_acos_out + operator_name: _foreach_acos overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190577,12 +201443,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erf_out - operator_name: _foreach_erf +- name: _foreach_asin_out + operator_name: _foreach_asin overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190623,12 +201489,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_erfc_out - operator_name: _foreach_erfc +- name: _foreach_atan_out + operator_name: _foreach_atan overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190669,12 +201535,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_expm1_out - operator_name: _foreach_expm1 +- name: _foreach_ceil_out + operator_name: _foreach_ceil overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190715,12 +201581,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_floor_out - operator_name: _foreach_floor +- name: _foreach_cos_out + operator_name: _foreach_cos overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190761,12 +201627,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log_out - operator_name: _foreach_log +- name: _foreach_cosh_out + operator_name: _foreach_cosh overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190807,12 +201673,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log10_out - operator_name: _foreach_log10 +- name: _foreach_erf_out + operator_name: _foreach_erf overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190853,12 +201719,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log1p_out - operator_name: _foreach_log1p +- name: _foreach_erfc_out + operator_name: _foreach_erfc overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190899,12 +201765,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_log2_out - operator_name: _foreach_log2 +- name: _foreach_exp_out + operator_name: _foreach_exp overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190945,12 +201811,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_neg_out - operator_name: _foreach_neg +- name: _foreach_expm1_out + operator_name: _foreach_expm1 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -190991,12 +201857,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tan_out - operator_name: _foreach_tan +- name: _foreach_floor_out + operator_name: _foreach_floor overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191037,12 +201903,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_tanh_out - operator_name: _foreach_tanh +- name: _foreach_frac_out + operator_name: _foreach_frac overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191083,12 +201949,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sin_out - operator_name: _foreach_sin - overload_name: out +- name: _foreach_lerp_out + operator_name: _foreach_lerp + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191102,13 +201968,33 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weights + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191129,12 +202015,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sinh_out - operator_name: _foreach_sinh - overload_name: out +- name: _foreach_lerp_out + operator_name: _foreach_lerp + overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191148,13 +202034,33 @@ is_nullable: false name: self type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList) + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191175,12 +202081,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_round_out - operator_name: _foreach_round +- name: _foreach_lgamma_out + operator_name: _foreach_lgamma overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191221,12 +202127,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lgamma_out - operator_name: _foreach_lgamma +- name: _foreach_log_out + operator_name: _foreach_log overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191267,12 +202173,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_frac_out - operator_name: _foreach_frac +- name: _foreach_log10_out + operator_name: _foreach_log10 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191313,12 +202219,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_reciprocal_out - operator_name: _foreach_reciprocal +- name: _foreach_log1p_out + operator_name: _foreach_log1p overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191359,12 +202265,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_sigmoid_out - operator_name: _foreach_sigmoid +- name: _foreach_log2_out + operator_name: _foreach_log2 overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191405,12 +202311,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_trunc_out - operator_name: _foreach_trunc +- name: _foreach_max_out + operator_name: _foreach_max overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191451,12 +202357,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv - overload_name: Scalar_out +- name: _foreach_neg_out + operator_name: _foreach_neg + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191470,45 +202376,83 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - default: 1 - dynamic_type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_norm_out + operator_name: _foreach_norm + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: + name: out + output: true + type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - annotation: null - dynamic_type: at::TensorList + default: 2 + dynamic_type: const at::Scalar & is_nullable: false - name: tensor1 - type: at::TensorList + name: ord + type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, ::std::optional, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 + default: 2 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: ord type: const at::Scalar & + - annotation: null + default: ::std::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191529,12 +202473,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: Scalar_out +- name: _foreach_pow_out + operator_name: _foreach_pow + overload_name: List_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191551,41 +202495,75 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: exponent type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: - - annotation: null + name: exponent + type: at::TensorList + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_pow_out + operator_name: _foreach_pow + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - annotation: null - default: 1 dynamic_type: const at::Scalar & is_nullable: false - name: value + name: exponent type: const at::Scalar & - allocate: true annotation: a! @@ -191607,12 +202585,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv +- name: _foreach_pow_out + operator_name: _foreach_pow overload_name: ScalarList_out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191626,42 +202604,22 @@ is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor1 - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: tensor2 - type: at::TensorList - annotation: null dynamic_type: at::ArrayRef is_nullable: false - name: scalars + name: exponent type: at::ArrayRef - allocate: true annotation: a! @@ -191683,12 +202641,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcdiv_out - operator_name: _foreach_addcdiv - overload_name: Tensor_out +- name: _foreach_reciprocal_out + operator_name: _foreach_reciprocal + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191702,43 +202660,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round_out + operator_name: _foreach_round + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191759,12 +202733,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: ScalarList_out +- name: _foreach_sigmoid_out + operator_name: _foreach_sigmoid + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191778,43 +202752,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sign_out + operator_name: _foreach_sign + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::ArrayRef - is_nullable: false - name: scalars - type: at::ArrayRef - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191835,12 +202825,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_addcmul_out - operator_name: _foreach_addcmul - overload_name: Tensor_out +- name: _foreach_sin_out + operator_name: _foreach_sin + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191854,43 +202844,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: out + output: true type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sinh_out + operator_name: _foreach_sinh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensor2 + name: self type: at::TensorList - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: scalars - type: const at::Tensor & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191911,12 +202917,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_norm_out - operator_name: _foreach_norm - overload_name: Scalar_out +- name: _foreach_sqrt_out + operator_name: _foreach_sqrt + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191930,25 +202936,13 @@ is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, const at::Scalar &, at::TensorList) + schema_order_cpp_signature: void (at::TensorList, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false name: self type: at::TensorList - - annotation: null - default: 2 - dynamic_type: const at::Scalar & - is_nullable: false - name: ord - type: const at::Scalar & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -191969,12 +202963,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_out - operator_name: _foreach_lerp - overload_name: List_out +- name: _foreach_tan_out + operator_name: _foreach_tan + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -191988,32 +202982,58 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: weights + name: out + output: true type: at::TensorList - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList) - schema_order_arguments: - - annotation: null + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh_out + operator_name: _foreach_tanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: weights + name: self type: at::TensorList - allocate: true annotation: a! @@ -192035,12 +203055,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foreach_lerp_out - operator_name: _foreach_lerp - overload_name: Scalar_out +- name: _foreach_trunc_out + operator_name: _foreach_trunc + overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () + schema_string: aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -192054,33 +203074,59 @@ is_nullable: false name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList - - annotation: null - dynamic_type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::TensorList is_nullable: false - name: weight - type: const at::Scalar & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList) - schema_order_arguments: - - annotation: null + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_zero_out + operator_name: _foreach_zero + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! dynamic_type: at::TensorList is_nullable: false - name: self + name: out + output: true type: at::TensorList - annotation: null dynamic_type: at::TensorList is_nullable: false - name: tensors1 + name: self type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false - name: weight - type: const at::Scalar & + name: self + type: at::TensorList - allocate: true annotation: a! dynamic_type: at::TensorList @@ -192101,86 +203147,103 @@ with_gil: false deprecated: false has_math_kernel: false -- name: bucketize_out - operator_name: bucketize - overload_name: Scalar_out +- name: _foreach_zero + operator_name: _foreach_zero + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_copy_out + operator_name: _foreach_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & + type: at::TensorList - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::TensorList is_nullable: false - kwarg_only: true - name: out_int32 - type: bool + name: src + type: at::TensorList - annotation: null default: false dynamic_type: bool is_nullable: false - kwarg_only: true - name: right + name: non_blocking type: bool - schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_cpp_signature: void (at::TensorList, at::TensorList, bool, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: const at::Scalar & + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Scalar & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: boundaries - type: const at::Tensor & + type: at::TensorList - annotation: null - default: false - dynamic_type: bool + dynamic_type: at::TensorList is_nullable: false - kwarg_only: true - name: out_int32 - type: bool + name: src + type: at::TensorList - annotation: null default: false dynamic_type: bool is_nullable: false - kwarg_only: true - name: right + name: non_blocking type: bool - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -192188,12 +203251,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: searchsorted_out - operator_name: searchsorted +- name: bucketize_out + operator_name: bucketize overload_name: Scalar_out manual_kernel_registration: false category_override: '' - schema_string: aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -192202,16 +203265,16 @@ name: out output: true type: at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - annotation: null dynamic_type: const at::Scalar & is_nullable: false name: self type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & - annotation: null default: false dynamic_type: bool @@ -192226,32 +203289,18 @@ kwarg_only: true name: right type: bool - - annotation: null - default: c10::nullopt - dynamic_type: c10::string_view - is_nullable: true - kwarg_only: true - name: side - type: c10::optional - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - kwarg_only: true - name: sorter - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &) schema_order_arguments: - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: sorted_sequence - type: const at::Tensor & - annotation: null dynamic_type: const at::Scalar & is_nullable: false name: self type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & - annotation: null default: false dynamic_type: bool @@ -192266,20 +203315,6 @@ kwarg_only: true name: right type: bool - - annotation: null - default: c10::nullopt - dynamic_type: c10::string_view - is_nullable: true - kwarg_only: true - name: side - type: c10::optional - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - kwarg_only: true - name: sorter - type: const c10::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -192953,7 +203988,7 @@ overload_name: output_mask_out manual_kernel_registration: false category_override: '' - schema_string: aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) arguments: - allocate: true annotation: a! @@ -193102,7 +204137,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193131,7 +204166,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -193150,7 +204185,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193172,7 +204207,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null dynamic_type: at::IntArrayRef is_nullable: false @@ -193219,7 +204254,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193249,7 +204284,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193271,7 +204306,7 @@ name: dilation size: 2 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193294,7 +204329,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193344,7 +204379,7 @@ overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) arguments: - allocate: true annotation: a! @@ -193374,7 +204409,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193396,7 +204431,7 @@ name: dilation size: 3 type: at::IntArrayRef - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const ::std::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193419,7 +204454,7 @@ dynamic_type: at::Tensor is_nullable: true name: bias - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 1 dynamic_type: at::IntArrayRef @@ -193705,8 +204740,8 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional>, at::Tensor &) + type: ::std::optional> + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, ::std::optional>, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193717,7 +204752,7 @@ dynamic_type: at::ArrayRef is_nullable: true name: addends - type: c10::optional> + type: ::std::optional> - allocate: true annotation: a! dynamic_type: at::Tensor @@ -193918,21 +204953,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -193948,13 +204983,13 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -193972,21 +205007,21 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: indices - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194002,12 +205037,12 @@ name: unsafe type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194071,14 +205106,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194087,13 +205122,13 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, int64_t, const ::std::optional &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -194121,14 +205156,14 @@ is_nullable: true kwarg_only: true name: lengths - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: offsets - type: const c10::optional & + type: const ::std::optional & - annotation: null default: 0 dynamic_type: int64_t @@ -194137,12 +205172,12 @@ name: axis type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: const at::Scalar & is_nullable: true kwarg_only: true name: initial - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194186,30 +205221,30 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional - schema_order_cpp_signature: at::Tensor & (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (at::TensorList, ::std::optional, ::std::optional, ::std::optional, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -194217,29 +205252,29 @@ name: list type: at::TensorList - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::ScalarType is_nullable: true name: dtype - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Layout is_nullable: true name: layout - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::Device is_nullable: true name: device - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: bool is_nullable: true name: pin_memory - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -194617,12 +205652,12 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -194640,11 +205675,11 @@ name: stride type: at::IntArrayRef - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: storage_offset - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -195157,24 +206192,24 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t is_nullable: false name: step type: int64_t - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t, at::Tensor &) + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, ::std::optional, ::std::optional, int64_t, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -195188,17 +206223,17 @@ name: dim type: int64_t - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: start - type: c10::optional + type: ::std::optional - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: end - type: c10::optional + type: ::std::optional - annotation: null default: 1 dynamic_type: int64_t @@ -196235,7 +207270,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -196253,7 +207288,7 @@ name: padding type: double - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: at::IntArrayRef is_nullable: true name: output_size @@ -196390,14 +207425,14 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, ::std::optional, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -196494,13 +207529,13 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -196595,7 +207630,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -196609,12 +207644,12 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional, at::Tensor &, at::Tensor &) + type: ::std::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &) schema_order_arguments: - annotation: null dynamic_type: at::Tensor @@ -196666,7 +207701,7 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & + type: const ::std::optional & - annotation: null default: true dynamic_type: bool @@ -196680,11 +207715,11 @@ name: average_attn_weights type: bool - annotation: null - default: c10::nullopt + default: ::std::nullopt dynamic_type: int64_t is_nullable: true name: mask_type - type: c10::optional + type: ::std::optional - allocate: true annotation: a! dynamic_type: at::Tensor @@ -196863,76 +207898,1637 @@ dynamic_type: at::Tensor is_nullable: true name: mask - type: const c10::optional & - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + type: const ::std::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: key + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: embed_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_head + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foobar_out + operator_name: _foobar + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg1 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg2 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: arg3 + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg1 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: arg2 + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: arg3 + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_out + operator_name: _fused_adam + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam + operator_name: _fused_adam + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam_out + operator_name: _fused_adam + overload_name: tensor_lr_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adam + operator_name: _fused_adam + overload_name: tensor_lr + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw_out + operator_name: _fused_adamw + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw + operator_name: _fused_adamw + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_adamw_out + operator_name: _fused_adamw + overload_name: tensor_lr_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + arguments: + - allocate: true + annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: out + output: true + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: state_steps + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: lr + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta1 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: beta2 + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: amsgrad + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: maximize + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avgs + type: at::TensorList + - annotation: d! + dynamic_type: at::TensorList + is_nullable: false + name: exp_avg_sqs + type: at::TensorList + - annotation: e! + dynamic_type: at::TensorList + is_nullable: false + name: max_exp_avg_sqs + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: key + kwarg_only: true + name: lr type: const at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: value - type: const at::Tensor & + kwarg_only: true + name: beta1 + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: proj_bias - type: const at::Tensor & + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: found_inf + type: const ::std::optional & - allocate: true annotation: a! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -196940,288 +209536,213 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _transformer_decoder_only_layer_fwd_out - operator_name: _transformer_decoder_only_layer_fwd - overload_name: out +- name: _fused_adamw + operator_name: _fused_adamw + overload_name: tensor_lr manual_kernel_registration: false category_override: '' - schema_string: aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + schema_string: aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: src - type: const at::Tensor & + name: self + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: embed_dim - type: int64_t + name: grads + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: num_heads - type: int64_t + name: exp_avgs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: max_exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: proj_weight - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + kwarg_only: true + name: lr type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool - annotation: null dynamic_type: double is_nullable: false - name: eps + kwarg_only: true + name: beta1 type: double - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: norm_bias_2 - type: const at::Tensor & + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_weight_1 - type: const at::Tensor & + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_bias_1 - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_weight_2 - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_bias_2 - type: const at::Tensor & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: maximize + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &) + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: src - type: const at::Tensor & + name: self + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: embed_dim - type: int64_t + name: grads + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: at::TensorList is_nullable: false - name: num_heads - type: int64_t + name: exp_avgs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_weight - type: const at::Tensor & + name: exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: qkv_bias - type: const at::Tensor & + name: max_exp_avg_sqs + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: proj_weight - type: const at::Tensor & + name: state_steps + type: at::TensorList - annotation: null dynamic_type: at::Tensor is_nullable: false - name: proj_bias + kwarg_only: true + name: lr type: const at::Tensor & - - annotation: null - dynamic_type: bool - is_nullable: false - name: use_gelu - type: bool - - annotation: null - dynamic_type: bool - is_nullable: false - name: norm_first - type: bool - annotation: null dynamic_type: double is_nullable: false - name: eps + kwarg_only: true + name: beta1 type: double - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_bias_1 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor - is_nullable: false - name: norm_weight_2 - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: norm_bias_2 - type: const at::Tensor & + kwarg_only: true + name: beta2 + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_weight_1 - type: const at::Tensor & + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: ffn_bias_1 - type: const at::Tensor & + kwarg_only: true + name: eps + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_weight_2 - type: const at::Tensor & + kwarg_only: true + name: amsgrad + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: ffn_bias_2 - type: const at::Tensor & - - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + kwarg_only: true + name: maximize + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & + kwarg_only: true + name: found_inf + type: const ::std::optional & method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor - name: out0 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out1 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out2 - type: at::Tensor & + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avgs_out + name: exp_avgs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: exp_avg_sqs_out + name: exp_avg_sqs_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: max_exp_avg_sqs_out + name: max_exp_avg_sqs_out + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -197229,239 +209750,177 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _native_decoder_only_multi_head_attention_out - operator_name: _native_decoder_only_multi_head_attention +- name: _fused_sgd_out + operator_name: _fused_sgd overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + schema_string: aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - - allocate: true - annotation: d! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: out3 + name: out output: true - type: at::Tensor & + type: at::TensorList - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList is_nullable: false - name: key - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Tensor & + name: momentum_buffer_list + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: momentum + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: lr + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: dampening + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: nesterov + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_bias - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: need_weights - type: bool - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: average_attn_weights - type: bool - schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: query - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::TensorList is_nullable: false - name: key - type: const at::Tensor & - - annotation: null - dynamic_type: at::Tensor + name: grads + type: at::TensorList + - annotation: c! + dynamic_type: at::TensorList is_nullable: false - name: value - type: const at::Tensor & + name: momentum_buffer_list + type: at::TensorList - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: embed_dim - type: int64_t + kwarg_only: true + name: weight_decay + type: double - annotation: null - dynamic_type: int64_t + dynamic_type: double is_nullable: false - name: num_head - type: int64_t + kwarg_only: true + name: momentum + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_weight - type: const at::Tensor & + kwarg_only: true + name: lr + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: double is_nullable: false - name: qkv_bias - type: const at::Tensor & + kwarg_only: true + name: dampening + type: double - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_weight - type: const at::Tensor & + kwarg_only: true + name: nesterov + type: bool - annotation: null - dynamic_type: at::Tensor + dynamic_type: bool is_nullable: false - name: proj_bias - type: const at::Tensor & + kwarg_only: true + name: maximize + type: bool - annotation: null - default: '{}' - dynamic_type: at::Tensor - is_nullable: true - name: mask - type: const c10::optional & + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_first_step + type: bool - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_key - type: const c10::optional & + kwarg_only: true + name: grad_scale + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true - name: incr_value - type: const c10::optional & - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: need_weights - type: bool - - annotation: null - default: true - dynamic_type: bool - is_nullable: false - name: average_attn_weights - type: bool + kwarg_only: true + name: found_inf + type: const ::std::optional & - allocate: true annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out0 - output: true - type: at::Tensor & - - allocate: true - annotation: b! - dynamic_type: at::Tensor - is_nullable: false - name: out1 - output: true - type: at::Tensor & - - allocate: true - annotation: c! - dynamic_type: at::Tensor - is_nullable: false - name: out2 - output: true - type: at::Tensor & - - allocate: true - annotation: d! - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false - name: out3 + name: out output: true - type: at::Tensor & + type: at::TensorList method_of: - Type - namespace mode: native python_module: '' - returns: - - dynamic_type: at::Tensor - name: out0 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out1 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out2 - type: at::Tensor & - - dynamic_type: at::Tensor - name: out3 - type: at::Tensor & + returns: [] inplace: false is_factory_method: false abstract: true @@ -197469,86 +209928,175 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _foobar_out - operator_name: _foobar - overload_name: out +- name: _fused_sgd + operator_name: _fused_sgd + overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + schema_string: aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) arguments: - - allocate: true - annotation: a! - dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & - annotation: null - dynamic_type: at::Tensor + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg1 + kwarg_only: true + name: nesterov type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg2 + kwarg_only: true + name: maximize type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false kwarg_only: true - name: arg3 + name: is_first_step type: bool - schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, bool, bool, at::Tensor &) - schema_order_arguments: - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList is_nullable: false name: self - type: const at::Tensor & + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: grads + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: momentum_buffer_list + type: at::TensorList + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: weight_decay + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: lr + type: double + - annotation: null + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dampening + type: double - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg1 + kwarg_only: true + name: nesterov type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false - name: arg2 + kwarg_only: true + name: maximize type: bool - annotation: null - default: true dynamic_type: bool is_nullable: false kwarg_only: true - name: arg3 + name: is_first_step type: bool - - allocate: true - annotation: a! + - annotation: null + default: '{}' dynamic_type: at::Tensor - is_nullable: false - name: out - output: true - type: at::Tensor & + is_nullable: true + kwarg_only: true + name: grad_scale + type: const ::std::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: found_inf + type: const ::std::optional & method_of: - Type - namespace mode: native python_module: '' returns: - - dynamic_type: at::Tensor - name: out - type: at::Tensor & + - dynamic_type: at::TensorList + field_name: self_out + name: self_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: grads_out + name: grads_out + type: ::std::vector + - dynamic_type: at::TensorList + field_name: momentum_buffer_list_out + name: momentum_buffer_list_out + type: ::std::vector inplace: false is_factory_method: false abstract: true @@ -197556,12 +210104,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adam_out - operator_name: _fused_adam - overload_name: out +- name: _fused_sgd_out + operator_name: _fused_sgd + overload_name: tensor_lr_out manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + schema_string: aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -197583,64 +210131,49 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197648,15 +210181,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -197671,64 +210204,49 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197736,14 +210254,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -197764,12 +210282,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adam - operator_name: _fused_adam - overload_name: '' +- name: _fused_sgd + operator_name: _fused_sgd + overload_name: tensor_lr manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + schema_string: aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) arguments: - annotation: null dynamic_type: at::TensorList @@ -197784,64 +210302,49 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197849,15 +210352,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -197872,64 +210375,49 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: state_steps + name: momentum_buffer_list type: at::TensorList - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: lr + name: weight_decay type: double - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 + name: momentum type: double - annotation: null - dynamic_type: double + dynamic_type: at::Tensor is_nullable: false kwarg_only: true - name: beta2 - type: double + name: lr + type: const at::Tensor & - annotation: null dynamic_type: double is_nullable: false kwarg_only: true - name: weight_decay + name: dampening type: double - annotation: null - dynamic_type: double + dynamic_type: bool is_nullable: false kwarg_only: true - name: eps - type: double + name: nesterov + type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: amsgrad + name: maximize type: bool - annotation: null dynamic_type: bool is_nullable: false kwarg_only: true - name: maximize + name: is_first_step type: bool - annotation: null default: '{}' @@ -197937,14 +210425,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -197960,16 +210448,8 @@ name: grads_out type: ::std::vector - dynamic_type: at::TensorList - field_name: exp_avgs_out - name: exp_avgs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: exp_avg_sqs_out - name: exp_avg_sqs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: max_exp_avg_sqs_out - name: max_exp_avg_sqs_out + field_name: momentum_buffer_list_out + name: momentum_buffer_list_out type: ::std::vector inplace: false is_factory_method: false @@ -197978,12 +210458,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adamw_out - operator_name: _fused_adamw +- name: _fused_adagrad_out + operator_name: _fused_adagrad overload_name: out manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + schema_string: aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () arguments: - allocate: true annotation: a! @@ -198005,19 +210485,9 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs + name: state_sums type: at::TensorList - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null dynamic_type: at::TensorList is_nullable: false name: state_steps @@ -198032,13 +210502,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198052,12 +210516,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198070,15 +210528,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList) + type: const ::std::optional & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &, at::TensorList) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -198093,19 +210551,9 @@ - annotation: c! dynamic_type: at::TensorList is_nullable: false - name: exp_avgs + name: state_sums type: at::TensorList - annotation: d! - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: e! - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs - type: at::TensorList - - annotation: null dynamic_type: at::TensorList is_nullable: false name: state_steps @@ -198120,13 +210568,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198140,12 +210582,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198158,14 +210594,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & - allocate: true annotation: a! dynamic_type: at::TensorList @@ -198186,12 +210622,12 @@ with_gil: false deprecated: false has_math_kernel: false -- name: _fused_adamw - operator_name: _fused_adamw +- name: _fused_adagrad + operator_name: _fused_adagrad overload_name: '' manual_kernel_registration: false category_override: '' - schema_string: aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + schema_string: aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out) arguments: - annotation: null dynamic_type: at::TensorList @@ -198206,17 +210642,7 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs + name: state_sums type: at::TensorList - annotation: null dynamic_type: at::TensorList @@ -198233,13 +210659,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198253,12 +210673,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198271,15 +210685,15 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & - schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &) + type: const ::std::optional & + schema_order_cpp_signature: ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, const ::std::optional &, const ::std::optional &) schema_order_arguments: - annotation: null dynamic_type: at::TensorList @@ -198294,17 +210708,7 @@ - annotation: null dynamic_type: at::TensorList is_nullable: false - name: exp_avgs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: exp_avg_sqs - type: at::TensorList - - annotation: null - dynamic_type: at::TensorList - is_nullable: false - name: max_exp_avg_sqs + name: state_sums type: at::TensorList - annotation: null dynamic_type: at::TensorList @@ -198321,13 +210725,7 @@ dynamic_type: double is_nullable: false kwarg_only: true - name: beta1 - type: double - - annotation: null - dynamic_type: double - is_nullable: false - kwarg_only: true - name: beta2 + name: lr_decay type: double - annotation: null dynamic_type: double @@ -198341,12 +210739,6 @@ kwarg_only: true name: eps type: double - - annotation: null - dynamic_type: bool - is_nullable: false - kwarg_only: true - name: amsgrad - type: bool - annotation: null dynamic_type: bool is_nullable: false @@ -198359,14 +210751,14 @@ is_nullable: true kwarg_only: true name: grad_scale - type: const c10::optional & + type: const ::std::optional & - annotation: null default: '{}' dynamic_type: at::Tensor is_nullable: true kwarg_only: true name: found_inf - type: const c10::optional & + type: const ::std::optional & method_of: - Type - namespace @@ -198382,16 +210774,12 @@ name: grads_out type: ::std::vector - dynamic_type: at::TensorList - field_name: exp_avgs_out - name: exp_avgs_out - type: ::std::vector - - dynamic_type: at::TensorList - field_name: exp_avg_sqs_out - name: exp_avg_sqs_out + field_name: state_sums_out + name: state_sums_out type: ::std::vector - dynamic_type: at::TensorList - field_name: max_exp_avg_sqs_out - name: max_exp_avg_sqs_out + field_name: state_steps_out + name: state_steps_out type: ::std::vector inplace: false is_factory_method: false